# Only create plots if we have data
# Simple correctness distribution
ggplot(pretest_data, aes(x=Correct)) +
geom_bar(alpha = 0.5) +
labs(title = "Distribution of Correct Responses")
# Distribution by Higher-Order-Operator Position and Simplification
ggplot(pretest_data, aes(x=Correct, fill=Simplification)) +
geom_bar(alpha = 0.5, position = "dodge") +
facet_wrap(~HOO_Position) +
labs(title = "Distribution by Higher-Order-Operator Position and Simplification Option") +
theme_minimal()
ggplot(student_data, aes(x = Pre_MP)) +
geom_histogram(alpha = 0.5, fill = "blue", bins = 12) +
labs(title = "Distribution of MP",
x = "Math performance",
y = "Density")
## Make simplification-level dataset
pretest_mean_data_if_simp <- pretest_data %>%
group_by (pid, Simplification) %>%
summarise(Pre_MP = mean(Correct),
Pre_Time = mean(trialElapsedTime))
# Plot correctness distribution by Simplification
ggplot(pretest_mean_data_if_simp, aes(x = Pre_MP, fill = Simplification)) +
geom_histogram(alpha = 0.5, bins = 12) +
facet_wrap (~Simplification) +
labs(title = "Distribution of Pretest Correctness by Simplification",
x = "Mean Correctness",
y = "Density")
## Make HOO-position-level dataset
pretest_mean_data_if_HOO <- pretest_data %>%
group_by (pid, HOO_Position) %>%
summarise(Pre_MP = mean(Correct),
Pre_Time = mean(trialElapsedTime))
# Plot correctness distribution by HOO_Position
ggplot(pretest_mean_data_if_HOO, aes(x = Pre_MP, fill = HOO_Position)) +
geom_histogram(alpha = 0.5, bins = 12) +
facet_wrap (~HOO_Position) +
labs(title = "Distribution of Pretest Correctness by HOO_Position",
x = "Mean Correctness",
y = "Density")
##
## 1 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 2 2.1 2.2 2.3 2.4 2.5 2.6 2.7 2.8 2.9
## 7 1 2 1 2 3 3 4 2 4 7 5 1 5 4 8 4 3 8 4
## 3 3.1 3.2 3.3 3.4 3.5 3.6 3.8 3.9 4 4.2 4.4 4.5 4.6 4.7 5
## 4 4 4 3 2 5 2 2 2 3 1 1 2 1 1 2
##
## Low MA Medium MA High MA
## 59 41 17
##
## Low MA Medium MA High MA
## 92 15 10
##
## Low MA Medium MA High MA
## 53 42 22
##
## Low MA Medium MA High MA
## 37 33 47
ggplot(student_data, aes(MA, Pre_MP)) +
geom_jitter()
student_data <- student_data %>% mutate (arousal_Pretest_mean =
(arousal_Pretest_3 +
arousal_Pretest_7 + arousal_Pretest_12)/3)
student_data <- student_data %>% mutate (valence_Pretest_mean =
(valence_Pretest_3 +
valence_Pretest_7 + valence_Pretest_12)/3)
## MA, MP and time
data_quant <-
student_data [, (colnames(student_data) %in%
c('MA', 'Pre_MP', 'Pre_Time', 'arousal_Pretest_mean', 'valence_Pretest_mean'))]
# Creating matrix
apa.cor.table(
data = data_quant,
filename = "Descriptives.doc",
table.number = 1,
show.conf.interval = TRUE,
show.sig.stars = TRUE,
landscape = TRUE
)
##
##
## Table 1
##
## Means, standard deviations, and correlations with confidence intervals
##
##
## Variable M SD 1 2 3
## 1. MA 2.61 0.95
##
## 2. Pre_MP 0.59 0.34 -.43**
## [-.57, -.27]
##
## 3. Pre_Time 38.47 18.67 .17 -.17
## [-.01, .34] [-.34, .01]
##
## 4. arousal_Pretest_mean 4.91 2.05 .54** -.35** -.00
## [.39, .65] [-.50, -.18] [-.19, .18]
##
## 5. valence_Pretest_mean 4.35 1.88 -.29** .16 .04
## [-.44, -.11] [-.03, .33] [-.15, .22]
##
## 4
##
##
##
##
##
##
##
##
##
##
##
## -.22*
## [-.39, -.04]
##
##
## Note. M and SD are used to represent mean and standard deviation, respectively.
## Values in square brackets indicate the 95% confidence interval.
## The confidence interval is a plausible range of population correlations
## that could have caused the sample correlation (Cumming, 2014).
## * indicates p < .05. ** indicates p < .01.
##
### --- Z-scoring for clustering
# Z-scoring MP and MA
student_data$Pre_MP_z <-
(student_data$Pre_MP - mean(student_data$Pre_MP))/sd(student_data$Pre_MP)
student_data$MA_z <-
(student_data$MA - mean(student_data$MA))/sd(student_data$MA)
# Creating new dataframes for PRE-levels clustering based on scaled variables
PRE_z <- student_data %>% as.data.frame() %>%
dplyr::select(MA_z, Pre_MP_z)
### --- How many clusters - Elbow method (widely used, recommended)
fviz_nbclust(PRE_z, kmeans, method = "wss") +
geom_vline(xintercept = 3, linetype = 2)+
labs(subtitle = "Elbow method")
# Range of cluster numbers to test
silhouette_scores <- numeric(10)
# Loop through different numbers of clusters
for (k in 3:10) {
set.seed(123) # For reproducibility
kmeans_result <- kmeans(PRE_z, centers = k)
sil <- silhouette(kmeans_result$cluster, dist(PRE_z))
silhouette_scores[k] <- mean(sil[, 3]) # Average Silhouette score for this k
}
# Find the number of clusters with the highest average Silhouette score
best_k <- which.max(silhouette_scores)
cat("The optimal number of clusters is", best_k, "with an average Silhouette score of", silhouette_scores[best_k], "\n")
## The optimal number of clusters is 3 with an average Silhouette score of 0.4035283
# Plot the Silhouette scores for each number of clusters
plot(3:10, silhouette_scores[3:10], type = "b",
xlab = "Number of Clusters", ylab = "Average Silhouette Score",
main = "Silhouette Score for Different Numbers of Clusters")
ggplot(student_data, aes(MA, Pre_MP)) +
geom_jitter(col = student_data$pre_cluster_results)
## # A tibble: 3 × 6
## pre_cluster_groups PreMP_mean PreMP_sd PreMA_mean PreMA_sd n
## <chr> <dbl> <dbl> <dbl> <dbl> <int>
## 1 hMP_lMA 0.765 0.187 1.61 0.393 39
## 2 hMP_mMA 0.79 0.143 2.92 0.565 45
## 3 lMP_mMA 0.126 0.161 3.37 0.817 33
# Create a combined visualization with facets for each variable and cluster
student_data <- student_data %>% mutate (Pre_MP_scaled = scale(Pre_MP),
MA_scaled = scale(MA))
student_data_long_subscales <- pivot_longer(student_data,
cols = c('MA_scaled', 'Pre_MP_scaled'),
names_to = 'Variable',
values_to = 'Value')
# Boxplots fro MP and MA for all clusters
ggplot(student_data_long_subscales, aes(x = Variable, y = Value, fill = Variable, alpha = 0.5)) +
geom_boxplot() +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter(alpha = 0.5) +
#facet_wrap(~Image) +
labs(x = "Cluster",
y = "Value") +
facet_wrap(~ pre_cluster_groups, scales = "fixed") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
| Characteristic | hMP_lMA N = 391 |
hMP_mMA N = 451 |
lMP_mMA N = 331 |
|---|---|---|---|
| gender | |||
| Â Â Â Â 1 | 18 (46%) | 18 (40%) | 9 (27%) |
| Â Â Â Â 2 | 15 (38%) | 23 (51%) | 23 (70%) |
| Â Â Â Â 3 | 3 (7.7%) | 4 (8.9%) | 1 (3.0%) |
| Â Â Â Â 4 | 3 (7.7%) | 0 (0%) | 0 (0%) |
| race_ethnicity | |||
| Â Â Â Â 1 | 25 (64%) | 26 (58%) | 20 (61%) |
| Â Â Â Â 2 | 3 (7.7%) | 2 (4.4%) | 5 (15%) |
| Â Â Â Â 3 | 1 (2.6%) | 1 (2.2%) | 1 (3.0%) |
| Â Â Â Â 5 | 9 (23%) | 13 (29%) | 5 (15%) |
| Â Â Â Â 6 | 1 (2.6%) | 3 (6.7%) | 1 (3.0%) |
| Â Â Â Â 7 | 0 (0%) | 0 (0%) | 1 (3.0%) |
| age | |||
| Â Â Â Â 17 | 1 (2.6%) | 1 (2.2%) | 0 (0%) |
| Â Â Â Â 18 | 11 (28%) | 13 (29%) | 7 (21%) |
| Â Â Â Â 19 | 11 (28%) | 12 (27%) | 11 (33%) |
| Â Â Â Â 20 | 9 (23%) | 10 (22%) | 4 (12%) |
| Â Â Â Â 21 | 6 (15%) | 6 (13%) | 7 (21%) |
| Â Â Â Â 22 | 1 (2.6%) | 3 (6.7%) | 3 (9.1%) |
| Â Â Â Â 34 | 0 (0%) | 0 (0%) | 1 (3.0%) |
| 1 n (%) | |||
## [1] "Ages in clusters"
## # A tibble: 3 × 3
## pre_cluster_groups mean sd
## <chr> <dbl> <dbl>
## 1 hMP_lMA 19.3 1.19
## 2 hMP_mMA 19.4 1.28
## 3 lMP_mMA 20.1 2.82
## MP comparison
dunn.test(student_data$Pre_MP, g=student_data$pre_cluster_groups, method='bonferroni')
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 69.6177, df = 2, p-value = 0
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -0.323592
## | 1.0000
## |
## lMP_mMA | 7.081554 7.617284
## | 0.0000* 0.0000*
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# Create boxplot to visualize the difference
ggplot(student_data,
aes(x = pre_cluster_groups, y = Pre_MP, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Pretest Math Performance",
title = "Math Performance Comparison between hMP_lMA and hMP_mMA Groups") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Modeling using interaction
model <- lm_robust(Pre_MP ~ MA, data = student_data)
summary(model)
##
## Call:
## lm_robust(formula = Pre_MP ~ MA, data = student_data)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower CI Upper DF
## (Intercept) 0.9916 0.06693 14.815 1.986e-28 0.859 1.12419 115
## MA -0.1523 0.02663 -5.717 8.682e-08 -0.205 -0.09952 115
##
## Multiple R-squared: 0.1836 , Adjusted R-squared: 0.1765
## F-statistic: 32.69 on 1 and 115 DF, p-value: 8.682e-08
## MA comparison
dunn.test(student_data$MA, g=student_data$pre_cluster_groups, method='bonferroni')
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 78.6595, df = 2, p-value = 0
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -6.982901
## | 0.0000*
## |
## lMP_mMA | -8.209599 -1.806754
## | 0.0000* 0.1062
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# Create boxplot to visualize the difference
ggplot(student_data,
aes(x = pre_cluster_groups, y = MA, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Math Anxiety",
title = "Math Anxiety Comparison between hMP_lMA and hMP_mMA Groups") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
hist(student_data$arousal_PrePre)
## Arousal comparison
dunn.test(student_data$arousal_PrePre, g=student_data$pre_cluster_groups, method='bonferroni')
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 19.7691, df = 2, p-value = 0
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -1.651629
## | 0.1479
## |
## lMP_mMA | -4.404071 -2.968510
## | 0.0000* 0.0045*
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
student_data %>%
group_by(pre_cluster_groups) %>%
summarise(arousal_Pretest_mean_mean = mean(arousal_PrePre),
arousal_Pretest_mean_sd = sd(arousal_PrePre))
## # A tibble: 3 × 3
## pre_cluster_groups arousal_Pretest_mean_mean arousal_Pretest_mean_sd
## <chr> <dbl> <dbl>
## 1 hMP_lMA 3.03 2.01
## 2 hMP_mMA 3.78 1.91
## 3 lMP_mMA 5.33 2.10
# Create boxplot to visualize the difference
ggplot(student_data,
aes(x = pre_cluster_groups, y = arousal_PrePre, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Mean PrePre Arousal",
title = "Mean Pretest Arousal Comparison between hMP_lMA and hMP_mMA Groups") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Modeling with lm_robust()
model <- lm_robust(log(arousal_PrePre) ~ scale(MA) * scale(Pre_MP), data = student_data)
summary(model)
##
## Call:
## lm_robust(formula = log(arousal_PrePre) ~ scale(MA) * scale(Pre_MP),
## data = student_data)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower CI Upper
## (Intercept) 1.18242 0.06083 19.4367 7.828e-38 1.0619 1.30294
## scale(MA) 0.25766 0.06075 4.2416 4.562e-05 0.1373 0.37800
## scale(Pre_MP) -0.04941 0.06243 -0.7915 4.303e-01 -0.1731 0.07427
## scale(MA):scale(Pre_MP) -0.02244 0.05729 -0.3918 6.960e-01 -0.1359 0.09106
## DF
## (Intercept) 113
## scale(MA) 113
## scale(Pre_MP) 113
## scale(MA):scale(Pre_MP) 113
##
## Multiple R-squared: 0.1915 , Adjusted R-squared: 0.1701
## F-statistic: 10.66 on 3 and 113 DF, p-value: 3.151e-06
# Plotting by continuous
ggplot(student_data, aes(x = MA, y = arousal_PrePre, color = as.factor(round(Pre_MP/2, 1)), alpha = 0.7)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter() +
labs(x = "Math anxiety",
y = "arousal_PrePre",
color = "MP_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
# Lm_robust with MA
model <- lm_robust(arousal_PrePre ~ MA, data = student_data)
summary(model)
##
## Call:
## lm_robust(formula = arousal_PrePre ~ MA, data = student_data)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower CI Upper DF
## (Intercept) 1.341 0.5390 2.489 1.426e-02 0.2737 2.409 115
## MA 1.006 0.2019 4.985 2.207e-06 0.6065 1.406 115
##
## Multiple R-squared: 0.191 , Adjusted R-squared: 0.184
## F-statistic: 24.85 on 1 and 115 DF, p-value: 2.207e-06
# Modeling using interaction
model <- lm_robust(arousal_PrePre ~ MA*Pre_MP, data = student_data)
summary(model)
##
## Call:
## lm_robust(formula = arousal_PrePre ~ MA * Pre_MP, data = student_data)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower CI Upper DF
## (Intercept) 0.896 1.5564 0.5757 0.565963 -2.1875 3.9795 113
## MA 1.273 0.4786 2.6593 0.008967 0.3246 2.2210 113
## Pre_MP 1.161 2.0449 0.5677 0.571379 -2.8905 5.2123 113
## MA:Pre_MP -0.664 0.6719 -0.9883 0.325137 -1.9951 0.6671 113
##
## Multiple R-squared: 0.2096 , Adjusted R-squared: 0.1887
## F-statistic: 9.201 on 3 and 113 DF, p-value: 1.697e-05
hist(student_data$arousal_Pretest_mean)
## Arousal comparison
dunn.test(student_data$arousal_Pretest_mean, g=student_data$pre_cluster_groups, method='bonferroni')
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 21.8693, df = 2, p-value = 0
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -1.942179
## | 0.0782
## |
## lMP_mMA | -4.657477 -2.952677
## | 0.0000* 0.0047*
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
student_data %>%
group_by(pre_cluster_groups) %>%
summarise(arousal_Pretest_mean_mean = mean(arousal_Pretest_mean),
arousal_Pretest_mean_sd = sd(arousal_Pretest_mean))
## # A tibble: 3 × 3
## pre_cluster_groups arousal_Pretest_mean_mean arousal_Pretest_mean_sd
## <chr> <dbl> <dbl>
## 1 hMP_lMA 3.97 1.85
## 2 hMP_mMA 4.73 1.71
## 3 lMP_mMA 6.25 2.02
# Create boxplot to visualize the difference
ggplot(student_data,
aes(x = pre_cluster_groups, y = arousal_Pretest_mean, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Mean Pretest Arousal",
title = "Mean Pretest Arousal Comparison between hMP_lMA and hMP_mMA Groups") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Modeling with lm_robust()
model <- lm_robust(arousal_Pretest_mean ~ scale(MA) * scale(Pre_MP), data = student_data)
summary(model)
##
## Call:
## lm_robust(formula = arousal_Pretest_mean ~ scale(MA) * scale(Pre_MP),
## data = student_data)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower CI Upper
## (Intercept) 4.8399 0.1701 28.4517 2.402e-53 4.5028 5.1769
## scale(MA) 0.9581 0.1664 5.7590 7.407e-08 0.6285 1.2878
## scale(Pre_MP) -0.2614 0.1877 -1.3926 1.665e-01 -0.6334 0.1105
## scale(MA):scale(Pre_MP) -0.1557 0.1702 -0.9148 3.622e-01 -0.4928 0.1814
## DF
## (Intercept) 113
## scale(MA) 113
## scale(Pre_MP) 113
## scale(MA):scale(Pre_MP) 113
##
## Multiple R-squared: 0.3113 , Adjusted R-squared: 0.2931
## F-statistic: 18.42 on 3 and 113 DF, p-value: 8.489e-10
# Plotting by continuous
ggplot(student_data, aes(x = MA, y = arousal_Pretest_mean, color = as.factor(MP_level), alpha = 0.7)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter() +
labs(x = "Math anxiety",
y = "arousal_Pretest_mean",
color = "MP_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
# Lm_robust with MA
model <- lm_robust(arousal_Pretest_mean ~ MA, data = student_data)
summary(model)
##
## Call:
## lm_robust(formula = arousal_Pretest_mean ~ MA, data = student_data)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower CI Upper DF
## (Intercept) 1.88 0.4472 4.204 5.208e-05 0.9943 2.766 115
## MA 1.16 0.1655 7.011 1.729e-10 0.8325 1.488 115
##
## Multiple R-squared: 0.2885 , Adjusted R-squared: 0.2823
## F-statistic: 49.15 on 1 and 115 DF, p-value: 1.729e-10
# Modeling using interaction
model <- lm_robust(arousal_Pretest_mean ~ MA*Pre_MP, data = student_data)
summary(model)
##
## Call:
## lm_robust(formula = arousal_Pretest_mean ~ MA * Pre_MP, data = student_data)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower CI Upper DF
## (Intercept) 1.9067 1.0590 1.8006 0.074436 -0.1912 4.0047 113
## MA 1.3019 0.3280 3.9694 0.000127 0.6521 1.9517 113
## Pre_MP 0.4965 1.5638 0.3175 0.751444 -2.6016 3.5947 113
## MA:Pre_MP -0.4883 0.5337 -0.9148 0.362233 -1.5456 0.5691 113
##
## Multiple R-squared: 0.3113 , Adjusted R-squared: 0.2931
## F-statistic: 18.42 on 3 and 113 DF, p-value: 8.489e-10
hist(student_data$valence_PrePre)
shapiro.test(student_data$valence_PrePre)
##
## Shapiro-Wilk normality test
##
## data: student_data$valence_PrePre
## W = 0.96145, p-value = 0.001957
## Arousal comparison
dunn.test(student_data$valence_PrePre, g=student_data$pre_cluster_groups, method='bonferroni')
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 3.4769, df = 2, p-value = 0.18
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | 1.657037
## | 0.1463
## |
## lMP_mMA | 1.563768 0.032067
## | 0.1768 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
student_data %>%
group_by(pre_cluster_groups) %>%
summarise(valence_Pretest_mean_mean = mean(valence_PrePre),
valence_Pretest_mean_sd = sd(valence_PrePre))
## # A tibble: 3 × 3
## pre_cluster_groups valence_Pretest_mean_mean valence_Pretest_mean_sd
## <chr> <dbl> <dbl>
## 1 hMP_lMA 5.44 1.89
## 2 hMP_mMA 4.84 1.71
## 3 lMP_mMA 4.67 2.03
# Create boxplot to visualize the difference
ggplot(student_data,
aes(x = pre_cluster_groups, y = valence_PrePre, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Mean Pretest Valence",
title = "Valence Comparison between hMP_lMA and hMP_mMA Groups") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Modeling with lm_robust()
model <- lm_robust(valence_PrePre ~ scale(MA) * scale(Pre_MP), data = student_data)
summary(model)
##
## Call:
## lm_robust(formula = valence_PrePre ~ scale(MA) * scale(Pre_MP),
## data = student_data)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower CI Upper
## (Intercept) 5.1000 0.1714 29.7633 2.674e-55 4.76055 5.4395
## scale(MA) -0.5845 0.1653 -3.5355 5.912e-04 -0.91211 -0.2570
## scale(Pre_MP) -0.1517 0.1716 -0.8837 3.787e-01 -0.49169 0.1883
## scale(MA):scale(Pre_MP) 0.2556 0.1610 1.5874 1.152e-01 -0.06339 0.5745
## DF
## (Intercept) 113
## scale(MA) 113
## scale(Pre_MP) 113
## scale(MA):scale(Pre_MP) 113
##
## Multiple R-squared: 0.1088 , Adjusted R-squared: 0.08518
## F-statistic: 5.457 on 3 and 113 DF, p-value: 0.001537
# Plotting by continuous
ggplot(student_data, aes(x = MA, y = valence_PrePre, color = as.factor(MP_level), alpha = 0.7)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter() +
labs(x = "Math anxiety",
y = "valence_PrePre",
color = "MP_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
hist(student_data$valence_Pretest_mean)
## Arousal comparison
dunn.test(student_data$valence_Pretest_mean, g=student_data$pre_cluster_groups, method='bonferroni')
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 1.9908, df = 2, p-value = 0.37
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | 0.521674
## | 0.9028
## |
## lMP_mMA | 1.397220 0.943989
## | 0.2435 0.5178
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
student_data %>%
group_by(pre_cluster_groups) %>%
summarise(valence_Pretest_mean_mean = mean(valence_Pretest_mean),
valence_Pretest_mean_sd = sd(valence_Pretest_mean))
## # A tibble: 3 × 3
## pre_cluster_groups valence_Pretest_mean_mean valence_Pretest_mean_sd
## <chr> <dbl> <dbl>
## 1 hMP_lMA 4.63 1.91
## 2 hMP_mMA 4.36 1.71
## 3 lMP_mMA 4 2.06
# Create boxplot to visualize the difference
ggplot(student_data,
aes(x = pre_cluster_groups, y = valence_Pretest_mean, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Mean Pretest Valence",
title = "Valence Comparison between hMP_lMA and hMP_mMA Groups") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Modeling with lm_robust()
model <- lm_robust(valence_Pretest_mean ~ scale(MA) * scale(Pre_MP), data = student_data)
summary(model)
##
## Call:
## lm_robust(formula = valence_Pretest_mean ~ scale(MA) * scale(Pre_MP),
## data = student_data)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower CI Upper
## (Intercept) 4.36402 0.1725 25.2973 2.348e-48 4.0222 4.7058
## scale(MA) -0.50292 0.1829 -2.7495 6.952e-03 -0.8653 -0.1405
## scale(Pre_MP) 0.06518 0.1693 0.3849 7.010e-01 -0.2703 0.4007
## scale(MA):scale(Pre_MP) 0.03870 0.1663 0.2328 8.164e-01 -0.2907 0.3681
## DF
## (Intercept) 113
## scale(MA) 113
## scale(Pre_MP) 113
## scale(MA):scale(Pre_MP) 113
##
## Multiple R-squared: 0.08361 , Adjusted R-squared: 0.05928
## F-statistic: 3.084 on 3 and 113 DF, p-value: 0.03021
# Plotting by continuous
ggplot(student_data, aes(x = MA, y = valence_Pretest_mean, color = as.factor(round(Pre_MP/2, 1)), alpha = 0.7)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter() +
labs(x = "Math anxiety",
y = "valence_Pretest_mean",
color = "MP_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
# Distribution for response time
hist(student_data$Pre_Time)
hist(log(student_data$Pre_Time))
student_data %>%
summarise(
Minimum = min(Pre_Time, na.rm = TRUE),
Median = median(Pre_Time, na.rm = TRUE),
Mean = mean(Pre_Time, na.rm = TRUE),
`Standard Deviation` = sd(Pre_Time, na.rm = TRUE),
`Count > 3*SD` = sum(Pre_Time > 3 * sd(Pre_Time), na.rm = TRUE)
)
## # A tibble: 1 × 5
## Minimum Median Mean `Standard Deviation` `Count > 3*SD`
## <dbl> <dbl> <dbl> <dbl> <int>
## 1 12.6 32.5 38.5 18.7 19
# Plot time distribution for correct and incorrect responses
student_data_long <- student_data %>%
pivot_longer(cols = c(Pre_Time_Correct, Pre_Time_Incorrect),
names_to = "Category",
values_to = "Time")
## Create density plot
ggplot(student_data_long, aes(x = Time, fill = Category)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Pretest Times",
x = "Time",
y = "Density") +
scale_fill_manual(values = c("Pre_Time_Correct" = "green",
"Pre_Time_Incorrect" = "red"),
labels = c("Correct Responses",
"Incorrect Responses")) +
theme_minimal()
## Warning: Removed 23 rows containing non-finite outside the scale range
## (`stat_density()`).
# Plot time distribution by Simplification
ggplot(pretest_mean_data_if_simp, aes(x = Pre_Time, fill = Simplification)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Pretest Time by Simplification Option Presense",
x = "Mean Time",
y = "Density")
# Plot time distribution by HOO_Position
ggplot(pretest_mean_data_if_HOO, aes(x = Pre_Time, fill = HOO_Position)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Pretest Time by Higher-Order-Operator Position",
x = "Mean Time",
y = "Density")
# Plot time distribution by cluster
ggplot(student_data, aes(x = Pre_Time, fill = pre_cluster_groups)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Pretest Time by cluster",
x = "Mean Time",
y = "Density")
# Plot time distribution by cluster for correct responses
ggplot(student_data, aes(x = Pre_Time_Correct, fill = pre_cluster_groups)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Pretest Time for Correct responses by cluster",
x = "Mean Time",
y = "Density")
## Warning: Removed 17 rows containing non-finite outside the scale range
## (`stat_density()`).
student_data %>%
group_by (pre_cluster_groups) %>%
summarise(n = n())
## # A tibble: 3 × 2
## pre_cluster_groups n
## <chr> <int>
## 1 hMP_lMA 39
## 2 hMP_mMA 45
## 3 lMP_mMA 33
print("Students with at least one correct response")
## [1] "Students with at least one correct response"
student_data %>%
filter(Pre_MP > 0) %>%
group_by (pre_cluster_groups) %>%
summarise(n = n())
## # A tibble: 3 × 2
## pre_cluster_groups n
## <chr> <int>
## 1 hMP_lMA 39
## 2 hMP_mMA 45
## 3 lMP_mMA 16
print("Pretest MP values")
## [1] "Pretest MP values"
table(student_data$pre_cluster_groups, round(student_data$Pre_MP, 1))
##
## 0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1
## hMP_lMA 0 0 1 1 2 1 3 3 17 7 4
## hMP_mMA 0 0 0 0 0 4 3 5 16 15 2
## lMP_mMA 17 4 4 5 2 1 0 0 0 0 0
print("In correct responses")
## [1] "In correct responses"
dunn.test(student_data$Pre_Time_Correct, g=student_data$pre_cluster_groups, method='bonferroni')
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 8.7397, df = 2, p-value = 0.01
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -2.829773
## | 0.0070*
## |
## lMP_mMA | -1.903222 0.185691
## | 0.0855 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# Create boxplot to visualize the difference
ggplot(student_data,
aes(x = pre_cluster_groups, y = Pre_Time_Correct, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Response Time",
title = "Response Time in Correct Responses Comparison between hMP_lMA and hMP_mMA Groups") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
## Warning: Removed 17 rows containing non-finite outside the scale range
## (`stat_boxplot()`).
## Warning: Removed 17 rows containing missing values or values outside the scale range
## (`geom_point()`).
# Plots with mean and CI
summary_data <- student_data %>%
group_by(pre_cluster_groups) %>%
summarise(mean_time = mean(Pre_Time_Correct, na.rm=TRUE),
se = sd(Pre_Time_Correct, na.rm=TRUE) / sqrt(n()),
ci_lower = mean_time - qt(0.975, df = n() - 1) * se,
ci_upper = mean_time + qt(0.975, df = n() - 1) * se)
ggplot(summary_data, aes(x = pre_cluster_groups, y = mean_time, fill = pre_cluster_groups)) +
geom_col(alpha = 0.7, width = 0.5) +
geom_errorbar(aes(ymin = ci_lower, ymax = ci_upper), width = 0.2) +
theme_minimal() +
labs(x = "Group",
y = "Mean Response Time",
title = "Mean Response Time with 95% CI (Correct Responses)") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
#library(pwr)
#pwr.anova.test(k = number of groups, n = sample_size, f = effect_size, sig.level = sig_level)
#print(result)
## Time comparison
print("In all responses")
## [1] "In all responses"
dunn.test(student_data$Pre_Time, g=student_data$pre_cluster_groups, method='bonferroni')
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 8.6718, df = 2, p-value = 0.01
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -2.503510
## | 0.0184*
## |
## lMP_mMA | -2.587356 -0.280407
## | 0.0145* 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# Create boxplot to visualize the difference
ggplot(student_data,
aes(x = pre_cluster_groups, y = Pre_Time, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Response Time",
title = "Response Time in All Responses Comparison between hMP_lMA and hMP_mMA Groups") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Plots with mean and CI
summary_data <- student_data %>%
group_by(pre_cluster_groups) %>%
summarise(mean_time = mean(Pre_Time, na.rm=TRUE),
se = sd(Pre_Time, na.rm=TRUE) / sqrt(n()),
ci_lower = mean_time - qt(0.975, df = n() - 1) * se,
ci_upper = mean_time + qt(0.975, df = n() - 1) * se)
ggplot(summary_data, aes(x = pre_cluster_groups, y = mean_time, fill = pre_cluster_groups)) +
geom_col(alpha = 0.7, width = 0.5) +
geom_errorbar(aes(ymin = ci_lower, ymax = ci_upper), width = 0.2) +
theme_minimal() +
labs(x = "Group",
y = "Mean Response Time",
title = "Mean Response Time with 95% CI (All Responses)") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Modeling with lm()
model <- lm(log(Pre_Time_Correct) ~ scale(MA) * scale(Pre_MP), data = student_data)
summary(model)
##
## Call:
## lm(formula = log(Pre_Time_Correct) ~ scale(MA) * scale(Pre_MP),
## data = student_data)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.86353 -0.30780 -0.00944 0.23427 0.84679
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3.523894 0.046683 75.485 <2e-16 ***
## scale(MA) 0.099115 0.044632 2.221 0.0287 *
## scale(Pre_MP) -0.007994 0.059266 -0.135 0.8930
## scale(MA):scale(Pre_MP) 0.103989 0.052461 1.982 0.0503 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.4018 on 96 degrees of freedom
## (17 observations deleted due to missingness)
## Multiple R-squared: 0.1051, Adjusted R-squared: 0.07715
## F-statistic: 3.759 on 3 and 96 DF, p-value: 0.01336
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.98486, p-value = 0.3104
bptest(model) # if p > 0.05 -- no strong evidence of heteroscedasticity (OLS is okay)
##
## studentized Breusch-Pagan test
##
## data: model
## BP = 3.8905, df = 3, p-value = 0.2735
# Plotting by continuous
ggplot(student_data, aes(x = MA, y = Pre_Time_Correct, color = MP_level)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95, alpha = 0.3) +
geom_jitter() +
labs(x = "MA",
y = "Pre_Time_Correct",
color = "MP_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 17 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning: Removed 17 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Removed 17 rows containing missing values or values outside the scale range
## (`geom_point()`).
# Modeling with lm()
model <- lm(log(Pre_Time) ~ scale(MA) * scale(Pre_MP), data = student_data)
summary(model)
##
## Call:
## lm(formula = log(Pre_Time) ~ scale(MA) * scale(Pre_MP), data = student_data)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.01199 -0.32698 -0.02159 0.27827 0.99405
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3.59175 0.04487 80.050 <2e-16 ***
## scale(MA) 0.07890 0.04532 1.741 0.0845 .
## scale(Pre_MP) -0.07381 0.04684 -1.576 0.1178
## scale(MA):scale(Pre_MP) 0.11205 0.04454 2.516 0.0133 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.4401 on 113 degrees of freedom
## Multiple R-squared: 0.09646, Adjusted R-squared: 0.07247
## F-statistic: 4.021 on 3 and 113 DF, p-value: 0.009254
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.98355, p-value = 0.1635
bptest(model) # if p > 0.05 -- no strong evidence of heteroscedasticity (OLS is okay)
##
## studentized Breusch-Pagan test
##
## data: model
## BP = 1.3511, df = 3, p-value = 0.717
# Modeling with lm_robust()
model_robust <- lm_robust(log(Pre_Time) ~ scale(MA) * scale(Pre_MP), data = student_data)
summary(model_robust)
##
## Call:
## lm_robust(formula = log(Pre_Time) ~ scale(MA) * scale(Pre_MP),
## data = student_data)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower
## (Intercept) 3.59175 0.04492 79.956 2.661e-101 3.50275
## scale(MA) 0.07890 0.04943 1.596 1.133e-01 -0.01903
## scale(Pre_MP) -0.07381 0.05137 -1.437 1.535e-01 -0.17559
## scale(MA):scale(Pre_MP) 0.11205 0.04855 2.308 2.281e-02 0.01588
## CI Upper DF
## (Intercept) 3.68075 113
## scale(MA) 0.17683 113
## scale(Pre_MP) 0.02796 113
## scale(MA):scale(Pre_MP) 0.20823 113
##
## Multiple R-squared: 0.09646 , Adjusted R-squared: 0.07247
## F-statistic: 3.749 on 3 and 113 DF, p-value: 0.01305
# Plotting by continuous
ggplot(student_data, aes(x = MA, y = Pre_Time , color = MP_level)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95, alpha = 0.3) +
geom_jitter() +
#facet_wrap(~Image) +
labs(x = "MA",
y = "Pre_Time",
color = "MP_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
pretes_problem_level <- problem_level_math_student_data %>% filter (grepl("^P", Image))
pretes_problem_level_correct <- pretes_problem_level %>% filter (Correct == "TRUE")
# Modeling with HLM
model <- lmer(log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretes_problem_level_correct)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) + (1 | Image) +
## (1 | problem_Order) + (1 | pid)
## Data: pretes_problem_level_correct
##
## REML criterion at convergence: 882.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.5271 -0.6358 -0.0815 0.5845 3.2108
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.12746 0.3570
## problem_Order (Intercept) 0.02120 0.1456
## Image (Intercept) 0.07219 0.2687
## Residual 0.11665 0.3415
## Number of obs: 833, groups: pid, 100; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 3.43087 0.09698 35.379
## scale(MA) 0.14213 0.04166 3.412
## scale(Pre_MP) -0.05898 0.03393 -1.738
## scale(MA):scale(Pre_MP) 0.05076 0.02648 1.917
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.025
## scal(Pr_MP) 0.077 0.143
## s(MA):(P_MP 0.066 0.390 -0.349
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.98866, p-value = 4.761e-06
# Plotting by continuous
ggplot(pretes_problem_level_correct, aes(x = Pre_MP, y = trialElapsedTime , color = MA_level, alpha = 0.7)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter() +
facet_wrap(~Image) +
labs(x = "Pre_MP",
y = "Time",
color = "MA_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
#### Problem-level: time across all reponses
pretes_problem_level <- problem_level_math_student_data %>% filter (grepl("^P", Image))
# Modeling with HLM
model <- lmer(log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretes_problem_level)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) + (1 | Image) +
## (1 | problem_Order) + (1 | pid)
## Data: pretes_problem_level
##
## REML criterion at convergence: 1918.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.4204 -0.6590 -0.0488 0.5689 3.7526
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.16639 0.4079
## problem_Order (Intercept) 0.02383 0.1544
## Image (Intercept) 0.04613 0.2148
## Residual 0.17623 0.4198
## Number of obs: 1399, groups: pid, 117; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 3.47548 0.08781 39.579
## scale(MA) 0.07361 0.04362 1.687
## scale(Pre_MP) -0.05522 0.04509 -1.225
## scale(MA):scale(Pre_MP) 0.11075 0.04263 2.598
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.013
## scal(Pr_MP) -0.054 0.394
## s(MA):(P_MP 0.208 0.069 -0.263
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.9911, p-value = 1.722e-07
| AOI | Type | NAs | min | mean | sd | median | max | zeros |
|---|---|---|---|---|---|---|---|---|
| Total_Hits_Outside_of_Screen_prop_Correct_Pretest | Total_ | 0.15 | 0.03 | 0.24 | 0.10 | 0.24 | 0.43 | 0 |
| Total_Problem_AOIs_prop_Correct_Pretest | Total_ | 0.15 | 0.07 | 0.15 | 0.04 | 0.13 | 0.25 | 0 |
| Total_Response_AOIs_prop_Correct_Pretest | Total_ | 0.15 | 0.00 | 0.02 | 0.01 | 0.02 | 0.06 | 0 |
| Total_Hits_Timer_prop_Correct_Pretest | Total_ | 0.15 | 0.00 | 0.01 | 0.01 | 0.00 | 0.03 | 0 |
| New_Problem_AOIs_Correct_Pretest | New_ | 0.15 | 20.50 | 93.88 | 62.49 | 78.81 | 349.83 | 0 |
| New_Response_AOIs_Correct_Pretest | New_ | 0.15 | 0.18 | 16.33 | 11.75 | 13.56 | 62.67 | 0 |
| Numerator_Denominator_Transitions_Correct_Pretest | New_ | 0.15 | 3.80 | 23.63 | 16.39 | 18.79 | 94.17 | 0 |
## Warning: Removed 56 rows containing non-finite outside the scale range
## (`stat_bin()`).
## Warning: Removed 42 rows containing non-finite outside the scale range
## (`stat_bin()`).
## Warning: Removed 56 rows containing non-finite outside the scale range
## (`stat_bin()`).
## Warning: Removed 42 rows containing non-finite outside the scale range
## (`stat_bin()`).
# Combine all data
AOI_student_data_pretest_correct <- pretest_mean_AOI_correct %>%
merge(student_data, by = "pid", all.x = TRUE, suffixes = c("", "_drop"))
AOI_student_data_pretest_correct <- AOI_student_data_pretest_correct[, !grepl("_drop$", names(AOI_student_data_pretest_correct))]
print ("all participants")
## [1] "all participants"
AOI_student_data_pretest %>%
group_by (pre_cluster_groups) %>%
summarise(n = n())
## # A tibble: 3 × 2
## pre_cluster_groups n
## <chr> <int>
## 1 hMP_lMA 29
## 2 hMP_mMA 39
## 3 lMP_mMA 28
print ("participants with correct responses")
## [1] "participants with correct responses"
AOI_student_data_pretest_correct %>%
group_by (pre_cluster_groups) %>%
summarise(n = n())
## # A tibble: 3 × 2
## pre_cluster_groups n
## <chr> <int>
## 1 hMP_lMA 29
## 2 hMP_mMA 39
## 3 lMP_mMA 14
aoi_group_columns_correct_pretest <- c("Total_Problem_AOIs_prop_Correct_Pretest",
"Total_Response_AOIs_prop_Correct_Pretest",
"Total_Hits_Timer_prop_Correct_Pretest",
"New_Problem_AOIs_Correct_Pretest", "New_Response_AOIs_Correct_Pretest",
"Numerator_Denominator_Transitions_Correct_Pretest",
"Total_Hits_Outside_of_Screen_prop_Correct_Pretest")
# Function to run Dunn test and return tidy results
run_dunn <- function(outcome_var, data, group_var) {
# Create boxplot for Total Problem counts
dunn_result <- dunn.test(data[[outcome_var]],
data[[group_var]],
method = "bonferroni",
kw = TRUE,
table = TRUE)
# Convert results to data frame
data.frame(
outcome = outcome_var,
comparison = paste(dunn_result$comparisons),
Z = dunn_result$Z,
P = dunn_result$P,
P.adjusted = dunn_result$P.adjusted
)
}
# Run all tests and combine results
all_dunn_tests <- map_df(aoi_group_columns_correct_pretest, ~run_dunn(.x, data = AOI_student_data_pretest_correct, group_var = "pre_cluster_groups"))
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 0.5953, df = 2, p-value = 0.74
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -0.410779
## | 1.0000
## |
## lMP_mMA | -0.766522 -0.477382
## | 0.6650 0.9496
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 0.1285, df = 2, p-value = 0.94
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | 0.097206
## | 1.0000
## |
## lMP_mMA | -0.269172 -0.357666
## | 1.0000 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 1.8898, df = 2, p-value = 0.39
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -0.720567
## | 0.7068
## |
## lMP_mMA | -1.367155 -0.860967
## | 0.2574 0.5839
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 2.8089, df = 2, p-value = 0.25
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -1.497922
## | 0.2012
## |
## lMP_mMA | -1.325213 -0.205369
## | 0.2777 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 1.7081, df = 2, p-value = 0.43
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -1.022986
## | 0.4595
## |
## lMP_mMA | -1.175582 -0.422852
## | 0.3596 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 3.3784, df = 2, p-value = 0.18
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -0.953680
## | 0.5104
## |
## lMP_mMA | -1.829100 -1.160029
## | 0.1011 0.3691
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 0.9161, df = 2, p-value = 0.63
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | 0.559768
## | 0.8635
## |
## lMP_mMA | -0.458260 -0.919221
## | 0.9701 0.5370
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# View results in nice format
all_dunn_tests %>%
arrange(outcome, P.adjusted) %>%
mutate(across(c(Z, P, P.adjusted), ~round(., 4))) %>%
knitr::kable()
| outcome | comparison | Z | P | P.adjusted |
|---|---|---|---|---|
| New_Problem_AOIs_Correct_Pretest | hMP_lMA - hMP_mMA | -1.4979 | 0.0671 | 0.2012 |
| New_Problem_AOIs_Correct_Pretest | hMP_lMA - lMP_mMA | -1.3252 | 0.0926 | 0.2777 |
| New_Problem_AOIs_Correct_Pretest | hMP_mMA - lMP_mMA | -0.2054 | 0.4186 | 1.0000 |
| New_Response_AOIs_Correct_Pretest | hMP_lMA - lMP_mMA | -1.1756 | 0.1199 | 0.3596 |
| New_Response_AOIs_Correct_Pretest | hMP_lMA - hMP_mMA | -1.0230 | 0.1532 | 0.4595 |
| New_Response_AOIs_Correct_Pretest | hMP_mMA - lMP_mMA | -0.4229 | 0.3362 | 1.0000 |
| Numerator_Denominator_Transitions_Correct_Pretest | hMP_lMA - lMP_mMA | -1.8291 | 0.0337 | 0.1011 |
| Numerator_Denominator_Transitions_Correct_Pretest | hMP_mMA - lMP_mMA | -1.1600 | 0.1230 | 0.3691 |
| Numerator_Denominator_Transitions_Correct_Pretest | hMP_lMA - hMP_mMA | -0.9537 | 0.1701 | 0.5104 |
| Total_Hits_Outside_of_Screen_prop_Correct_Pretest | hMP_mMA - lMP_mMA | -0.9192 | 0.1790 | 0.5370 |
| Total_Hits_Outside_of_Screen_prop_Correct_Pretest | hMP_lMA - hMP_mMA | 0.5598 | 0.2878 | 0.8635 |
| Total_Hits_Outside_of_Screen_prop_Correct_Pretest | hMP_lMA - lMP_mMA | -0.4583 | 0.3234 | 0.9701 |
| Total_Hits_Timer_prop_Correct_Pretest | hMP_lMA - lMP_mMA | -1.3672 | 0.0858 | 0.2574 |
| Total_Hits_Timer_prop_Correct_Pretest | hMP_mMA - lMP_mMA | -0.8610 | 0.1946 | 0.5839 |
| Total_Hits_Timer_prop_Correct_Pretest | hMP_lMA - hMP_mMA | -0.7206 | 0.2356 | 0.7068 |
| Total_Problem_AOIs_prop_Correct_Pretest | hMP_lMA - lMP_mMA | -0.7665 | 0.2217 | 0.6650 |
| Total_Problem_AOIs_prop_Correct_Pretest | hMP_mMA - lMP_mMA | -0.4774 | 0.3165 | 0.9496 |
| Total_Problem_AOIs_prop_Correct_Pretest | hMP_lMA - hMP_mMA | -0.4108 | 0.3406 | 1.0000 |
| Total_Response_AOIs_prop_Correct_Pretest | hMP_lMA - hMP_mMA | 0.0972 | 0.4613 | 1.0000 |
| Total_Response_AOIs_prop_Correct_Pretest | hMP_lMA - lMP_mMA | -0.2692 | 0.3939 | 1.0000 |
| Total_Response_AOIs_prop_Correct_Pretest | hMP_mMA - lMP_mMA | -0.3577 | 0.3603 | 1.0000 |
# Create boxplot for Total Problem counts
ggplot(AOI_student_data_pretest_correct,
aes(x = pre_cluster_groups, y = Total_Problem_AOIs_Correct_Pretest, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Total_Problem_AOIs_Correct_Pretest",
title = "Total_Problem_AOIs_Correct_Pretest Comparison") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Create boxplot for Numerator_Denominator_Transitions_Correct_Pretest
ggplot(AOI_student_data_pretest_correct,
aes(x = pre_cluster_groups, y = Numerator_Denominator_Transitions_Correct_Pretest, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Numerator_Denominator_Transitions_Correct_Pretest",
title = "Numerator_Denominator_Transitions_Correct_Pretest Comparison") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Create boxplot for Total_Hits_Timer_prop_Correct_Pretest
ggplot(AOI_student_data_pretest_correct,
aes(x = pre_cluster_groups, y = Total_Hits_Timer_prop_Correct_Pretest, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Time_Prop_Correct_Pretest",
title = "Time_Prop_Correct_Pretest Comparison") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
aoi_group_columns_correct_pretest_common_plot <- c("Total_Problem_AOIs_prop_Correct_Pretest",
"Total_Response_AOIs_prop_Correct_Pretest",
"Total_Hits_Timer_prop_Correct_Pretest",
"New_Problem_AOIs_Correct_Pretest", "New_Response_AOIs_Correct_Pretest",
"Numerator_Denominator_Transitions_Correct_Pretest")
# Create visualization
# Reshape data for plotting
long_AOI_student_data_pretest_correct <- AOI_student_data_pretest_correct %>%
# filter(pre_cluster_groups %in% c("hMP_lMA", "hMP_mMA")) %>%
pivot_longer(cols = all_of(aoi_group_columns_correct_pretest_common_plot),
names_to = "Variable",
values_to = "Value")
# Create boxplots with individual points
ggplot(long_AOI_student_data_pretest_correct, aes(x = pre_cluster_groups, y = Value, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
facet_wrap(~Variable, scales = "free_y") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "none") +
labs(x = "Group",
y = "Value",
title = "Comparison of Eye-tracking Measures between Groups",
subtitle = "Aggregated by student") +
scale_fill_brewer(palette = "Set2")
aoi_group_columns_pretest <- c("Total_Problem_AOIs_prop_Pretest",
"Total_Response_AOIs_prop_Pretest",
"Total_Hits_Timer_prop_Pretest",
"New_Problem_AOIs_Pretest", "New_Response_AOIs_Pretest",
"Numerator_Denominator_Transitions_Pretest",
"Total_Hits_Outside_of_Screen_prop_Pretest")
# Function to run Dunn test and return tidy results
run_dunn <- function(outcome_var, data, group_var) {
# Create boxplot for Total Problem counts
dunn_result <- dunn.test(data[[outcome_var]],
data[[group_var]],
method = "bonferroni",
kw = TRUE,
table = TRUE)
# Convert results to data frame
data.frame(
outcome = outcome_var,
comparison = paste(dunn_result$comparisons),
Z = dunn_result$Z,
P = dunn_result$P,
P.adjusted = dunn_result$P.adjusted
)
}
# Run all tests and combine results
all_dunn_tests <- map_df(aoi_group_columns_pretest, ~run_dunn(.x, data = AOI_student_data_pretest, group_var = "pre_cluster_groups"))
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 0.4825, df = 2, p-value = 0.79
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -0.679324
## | 0.7454
## |
## lMP_mMA | -0.237609 0.418318
## | 1.0000 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 2.2002, df = 2, p-value = 0.33
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -0.219019
## | 1.0000
## |
## lMP_mMA | -1.359579 -1.237435
## | 0.2609 0.3239
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 0.7101, df = 2, p-value = 0.7
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -0.807215
## | 0.6293
## |
## lMP_mMA | -0.633402 0.121567
## | 0.7897 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 3.5136, df = 2, p-value = 0.17
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -1.514629
## | 0.1948
## |
## lMP_mMA | -1.739855 -0.361648
## | 0.1228 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 3.9294, df = 2, p-value = 0.14
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -1.175622
## | 0.3596
## |
## lMP_mMA | -1.976643 -0.950512
## | 0.0721 0.5128
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 3.3964, df = 2, p-value = 0.18
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -1.232319
## | 0.3267
## |
## lMP_mMA | -1.815455 -0.721975
## | 0.1042 0.7055
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 0.941, df = 2, p-value = 0.62
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | 0.893037
## | 0.5578
## |
## lMP_mMA | 0.795090 -0.033576
## | 0.6398 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# View results in nice format
all_dunn_tests %>%
arrange(outcome, P.adjusted) %>%
mutate(across(c(Z, P, P.adjusted), ~round(., 4))) %>%
knitr::kable()
| outcome | comparison | Z | P | P.adjusted |
|---|---|---|---|---|
| New_Problem_AOIs_Pretest | hMP_lMA - lMP_mMA | -1.7399 | 0.0409 | 0.1228 |
| New_Problem_AOIs_Pretest | hMP_lMA - hMP_mMA | -1.5146 | 0.0649 | 0.1948 |
| New_Problem_AOIs_Pretest | hMP_mMA - lMP_mMA | -0.3616 | 0.3588 | 1.0000 |
| New_Response_AOIs_Pretest | hMP_lMA - lMP_mMA | -1.9766 | 0.0240 | 0.0721 |
| New_Response_AOIs_Pretest | hMP_lMA - hMP_mMA | -1.1756 | 0.1199 | 0.3596 |
| New_Response_AOIs_Pretest | hMP_mMA - lMP_mMA | -0.9505 | 0.1709 | 0.5128 |
| Numerator_Denominator_Transitions_Pretest | hMP_lMA - lMP_mMA | -1.8155 | 0.0347 | 0.1042 |
| Numerator_Denominator_Transitions_Pretest | hMP_lMA - hMP_mMA | -1.2323 | 0.1089 | 0.3267 |
| Numerator_Denominator_Transitions_Pretest | hMP_mMA - lMP_mMA | -0.7220 | 0.2352 | 0.7055 |
| Total_Hits_Outside_of_Screen_prop_Pretest | hMP_lMA - hMP_mMA | 0.8930 | 0.1859 | 0.5578 |
| Total_Hits_Outside_of_Screen_prop_Pretest | hMP_lMA - lMP_mMA | 0.7951 | 0.2133 | 0.6398 |
| Total_Hits_Outside_of_Screen_prop_Pretest | hMP_mMA - lMP_mMA | -0.0336 | 0.4866 | 1.0000 |
| Total_Hits_Timer_prop_Pretest | hMP_lMA - hMP_mMA | -0.8072 | 0.2098 | 0.6293 |
| Total_Hits_Timer_prop_Pretest | hMP_lMA - lMP_mMA | -0.6334 | 0.2632 | 0.7897 |
| Total_Hits_Timer_prop_Pretest | hMP_mMA - lMP_mMA | 0.1216 | 0.4516 | 1.0000 |
| Total_Problem_AOIs_prop_Pretest | hMP_lMA - hMP_mMA | -0.6793 | 0.2485 | 0.7454 |
| Total_Problem_AOIs_prop_Pretest | hMP_lMA - lMP_mMA | -0.2376 | 0.4061 | 1.0000 |
| Total_Problem_AOIs_prop_Pretest | hMP_mMA - lMP_mMA | 0.4183 | 0.3379 | 1.0000 |
| Total_Response_AOIs_prop_Pretest | hMP_lMA - lMP_mMA | -1.3596 | 0.0870 | 0.2609 |
| Total_Response_AOIs_prop_Pretest | hMP_mMA - lMP_mMA | -1.2374 | 0.1080 | 0.3239 |
| Total_Response_AOIs_prop_Pretest | hMP_lMA - hMP_mMA | -0.2190 | 0.4133 | 1.0000 |
# Create boxplot for Numerator_Denominator_Transitions_Correct_Pretest
ggplot(AOI_student_data_pretest,
aes(x = pre_cluster_groups, y = Numerator_Denominator_Transitions_Pretest, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Numerator_Denominator_Transitions_Pretest",
title = "Numerator_Denominator_Transitions_Pretest Comparison") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Create boxplot for Total Problem counts
ggplot(AOI_student_data_pretest,
aes(x = pre_cluster_groups, y = New_Problem_AOIs_Pretest, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "New_Problem_AOIs_Pretest",
title = "New_Problem_AOIs_Pretest Comparison") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Create boxplot for Total_Hits_Timer_prop_Correct_Pretest
ggplot(AOI_student_data_pretest,
aes(x = pre_cluster_groups, y = Total_Hits_Timer_prop_Pretest, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Time_Prop_Pretest",
title = "Time_Prop_Pretest Comparison") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
aoi_group_columns_pretest_common_plot <- c("Total_Problem_AOIs_prop_Pretest",
"Total_Response_AOIs_prop_Pretest",
"Total_Hits_Timer_prop_Pretest",
"New_Problem_AOIs_Pretest", "New_Response_AOIs_Pretest",
"Numerator_Denominator_Transitions_Pretest")
# Create visualization
# Reshape data for plotting
long_AOI_student_data_pretest <- AOI_student_data_pretest %>%
# filter(pre_cluster_groups %in% c("hMP_lMA", "hMP_mMA")) %>%
pivot_longer(cols = all_of(aoi_group_columns_pretest_common_plot),
names_to = "Variable",
values_to = "Value")
# Create boxplots with individual points
ggplot(long_AOI_student_data_pretest, aes(x = pre_cluster_groups, y = Value, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
facet_wrap(~Variable, scales = "free_y") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "none") +
labs(x = "Group",
y = "Value",
title = "Comparison of Eye-tracking Measures between Groups",
subtitle = "Aggregated by student") +
scale_fill_brewer(palette = "Set2")
# Create empty dataframe for storing results
model_summaries <- data.frame()
for(dv in aoi_group_columns_correct_pretest) {
# Fit model
formula <- as.formula(paste0("log(", dv, ") ~ scale(MA) * scale(Pre_MP)"))
model <- lm(
formula,
data = AOI_student_data_pretest_correct
)
# Shapiro-Wilk test for normality
shapiro_test <- shapiro.test(resid(model))
print(paste("Shapiro-Wilk test for", dv, ": W =", round(shapiro_test$statistic, 3),
"p =", format.pval(shapiro_test$p.value, digits = 3)))
bptest(model) # if p > 0.05 -- no strong evidence of heteroscedasticity (OLS is okay)
# Get model summary
sum_model <- summary(model)
# Extract fixed effects
fixed_effects <- sum_model$coefficient
# Get standardized effect sizes
beta <- standardize_parameters(model)
# Add to summary dataframe
temp_df <- data.frame(
DV = dv,
Predictor = rownames(fixed_effects),
Estimate = round(fixed_effects[,"Estimate"], 3),
SE = round(fixed_effects[,"Std. Error"], 3),
t_value = round(fixed_effects[,"t value"], 3),
p_value = fixed_effects[,"Pr(>|t|)"],
Std_Beta = round(beta$Std_Coefficient, 3)
)
model_summaries <- rbind(model_summaries, temp_df)
}
## [1] "Shapiro-Wilk test for Total_Problem_AOIs_prop_Correct_Pretest : W = 0.985 p = 0.466"
## [1] "Shapiro-Wilk test for Total_Response_AOIs_prop_Correct_Pretest : W = 0.846 p = 9.22e-08"
## [1] "Shapiro-Wilk test for Total_Hits_Timer_prop_Correct_Pretest : W = 0.966 p = 0.0266"
## [1] "Shapiro-Wilk test for New_Problem_AOIs_Correct_Pretest : W = 0.994 p = 0.981"
## [1] "Shapiro-Wilk test for New_Response_AOIs_Correct_Pretest : W = 0.912 p = 3.28e-05"
## [1] "Shapiro-Wilk test for Numerator_Denominator_Transitions_Correct_Pretest : W = 0.993 p = 0.944"
## [1] "Shapiro-Wilk test for Total_Hits_Outside_of_Screen_prop_Correct_Pretest : W = 0.904 p = 1.43e-05"
# Format results
model_summaries <- model_summaries %>%
mutate(
p_value = format.pval(p_value, digits = 3),
Significant = ifelse(as.numeric(p_value) < 0.05, "*", "")
)
# Display formatted table
kable(model_summaries,
caption = "Summary of Gaussian Models with log()",
digits = 3) %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed"))
| DV | Predictor | Estimate | SE | t_value | p_value | Std_Beta | Significant | |
|---|---|---|---|---|---|---|---|---|
| (Intercept) | Total_Problem_AOIs_prop_Correct_Pretest | (Intercept) | -1.977 | 0.038 | -51.725 | <2e-16 | 0.954 | NA |
| scale(MA) | Total_Problem_AOIs_prop_Correct_Pretest | scale(MA) | 0.022 | 0.040 | 0.550 | 0.5840 | 0.022 | |
| scale(Pre_MP) | Total_Problem_AOIs_prop_Correct_Pretest | scale(Pre_MP) | -0.008 | 0.043 | -0.174 | 0.8622 | -0.012 | |
| scale(MA):scale(Pre_MP) | Total_Problem_AOIs_prop_Correct_Pretest | scale(MA):scale(Pre_MP) | -0.010 | 0.035 | -0.288 | 0.7741 | -0.014 | |
| (Intercept)1 | Total_Response_AOIs_prop_Correct_Pretest | (Intercept) | -4.014 | 0.083 | -48.443 | <2e-16 | 0.995 | NA |
| scale(MA)1 | Total_Response_AOIs_prop_Correct_Pretest | scale(MA) | 0.035 | 0.086 | 0.402 | 0.6889 | 0.024 | |
| scale(Pre_MP)1 | Total_Response_AOIs_prop_Correct_Pretest | scale(Pre_MP) | 0.006 | 0.093 | 0.068 | 0.9458 | 0.019 | |
| scale(MA):scale(Pre_MP)1 | Total_Response_AOIs_prop_Correct_Pretest | scale(MA):scale(Pre_MP) | -0.029 | 0.075 | -0.387 | 0.6997 | -0.020 | |
| (Intercept)2 | Total_Hits_Timer_prop_Correct_Pretest | (Intercept) | -5.440 | 0.124 | -43.749 | <2e-16 | 0.649 | NA |
| scale(MA)2 | Total_Hits_Timer_prop_Correct_Pretest | scale(MA) | -0.011 | 0.129 | -0.086 | 0.9315 | 0.019 | |
| scale(Pre_MP)2 | Total_Hits_Timer_prop_Correct_Pretest | scale(Pre_MP) | -0.169 | 0.140 | -1.204 | 0.2324 | -0.081 | |
| scale(MA):scale(Pre_MP)2 | Total_Hits_Timer_prop_Correct_Pretest | scale(MA):scale(Pre_MP) | -0.014 | 0.113 | -0.123 | 0.9021 | 0.028 | |
| (Intercept)3 | New_Problem_AOIs_Correct_Pretest | (Intercept) | 4.386 | 0.072 | 61.190 | <2e-16 | 0.711 | NA |
| scale(MA)3 | New_Problem_AOIs_Correct_Pretest | scale(MA) | 0.117 | 0.074 | 1.568 | 0.1208 | 0.086 | |
| scale(Pre_MP)3 | New_Problem_AOIs_Correct_Pretest | scale(Pre_MP) | -0.100 | 0.081 | -1.242 | 0.2181 | -0.050 | |
| scale(MA):scale(Pre_MP)3 | New_Problem_AOIs_Correct_Pretest | scale(MA):scale(Pre_MP) | 0.063 | 0.065 | 0.968 | 0.3361 | 0.041 | |
| (Intercept)4 | New_Response_AOIs_Correct_Pretest | (Intercept) | 2.523 | 0.106 | 23.739 | <2e-16 | 0.796 | NA |
| scale(MA)4 | New_Response_AOIs_Correct_Pretest | scale(MA) | 0.148 | 0.110 | 1.345 | 0.1824 | 0.074 | |
| scale(Pre_MP)4 | New_Response_AOIs_Correct_Pretest | scale(Pre_MP) | -0.063 | 0.120 | -0.530 | 0.5976 | -0.033 | |
| scale(MA):scale(Pre_MP)4 | New_Response_AOIs_Correct_Pretest | scale(MA):scale(Pre_MP) | 0.049 | 0.097 | 0.506 | 0.6140 | 0.020 | |
| (Intercept)5 | Numerator_Denominator_Transitions_Correct_Pretest | (Intercept) | 2.979 | 0.076 | 39.148 | <2e-16 | 0.723 | NA |
| scale(MA)5 | Numerator_Denominator_Transitions_Correct_Pretest | scale(MA) | 0.079 | 0.079 | 0.996 | 0.3223 | 0.063 | |
| scale(Pre_MP)5 | Numerator_Denominator_Transitions_Correct_Pretest | scale(Pre_MP) | -0.165 | 0.086 | -1.924 | 0.0580 | -0.081 | |
| scale(MA):scale(Pre_MP)5 | Numerator_Denominator_Transitions_Correct_Pretest | scale(MA):scale(Pre_MP) | 0.047 | 0.069 | 0.678 | 0.5001 | 0.025 | |
| (Intercept)6 | Total_Hits_Outside_of_Screen_prop_Correct_Pretest | (Intercept) | -1.558 | 0.065 | -24.022 | <2e-16 | 1.087 | NA |
| scale(MA)6 | Total_Hits_Outside_of_Screen_prop_Correct_Pretest | scale(MA) | -0.116 | 0.067 | -1.729 | 0.0878 | -0.078 | |
| scale(Pre_MP)6 | Total_Hits_Outside_of_Screen_prop_Correct_Pretest | scale(Pre_MP) | -0.062 | 0.073 | -0.843 | 0.4017 | -0.046 | |
| scale(MA):scale(Pre_MP)6 | Total_Hits_Outside_of_Screen_prop_Correct_Pretest | scale(MA):scale(Pre_MP) | -0.016 | 0.059 | -0.265 | 0.7915 | -0.007 |
# Plotting by continuous
ggplot(AOI_student_data_pretest_correct,
aes(x = Pre_MP,
y = Numerator_Denominator_Transitions_Correct_Pretest ,
color = MA_level)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95, alpha = 0.3) +
geom_jitter() +
labs(x = "Pre_MP",
y = "Numerator_Denominator_Transitions_Correct_Pretest",
color = "MA_level") +
theme_minimal() +
theme(legend.position = "bottom")
# Plotting by continuous
ggplot(AOI_student_data_pretest_correct,
aes(x = Pre_MP,
y = New_Problem_AOIs_Correct_Pretest ,
color = MA_level)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95, alpha = 0.3) +
geom_jitter(alpha = 0.5) +
labs(x = "Pre_MP",
y = "New_Problem_AOIs_Correct_Pretest",
color = "MA_level") +
theme_minimal() +
theme(legend.position = "bottom")
aoi_cols_non_normal <- c(
"Total_Response_AOIs_prop_Correct_Pretest",
"Total_Hits_Timer_prop_Correct_Pretest",
"New_Response_AOIs_Correct_Pretest")
# Create empty dataframe for storing results
model_summaries <- data.frame()
for(dv in aoi_cols_non_normal) {
# Fit model
formula <- as.formula(paste0("log(", dv, ") ~ scale(MA) * scale(Pre_MP)"))
model <- lm_robust(
formula,
data = AOI_student_data_pretest_correct
)
# Get model summary
sum_model <- summary(model)
# Extract fixed effects
fixed_effects <- sum_model$coefficients
# Get standardized effect sizes
beta <- standardize_parameters(model)
# Add to summary dataframe
temp_df <- data.frame(
DV = dv,
Predictor = rownames(fixed_effects),
Estimate = round(fixed_effects[,"Estimate"], 3),
SE = round(fixed_effects[,"Std. Error"], 3),
z_value = round(fixed_effects[,"t value"], 3),
p_value = fixed_effects[,"Pr(>|t|)"],
Std_Beta = round(beta$Std_Coefficient, 3)
)
model_summaries <- rbind(model_summaries, temp_df)
}
# Format results
model_summaries <- model_summaries %>%
mutate(
p_value = format.pval(p_value, digits = 3),
Significant = ifelse(as.numeric(p_value) < 0.05, "*", "")
)
# Display formatted table
kable(model_summaries,
caption = "Summary of Robust Linear Models with log()",
digits = 3) %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed"))
| DV | Predictor | Estimate | SE | z_value | p_value | Std_Beta | Significant | |
|---|---|---|---|---|---|---|---|---|
| (Intercept) | Total_Response_AOIs_prop_Correct_Pretest | (Intercept) | -4.014 | 0.078 | -51.571 | <2e-16 | 0.995 | NA |
| scale(MA) | Total_Response_AOIs_prop_Correct_Pretest | scale(MA) | 0.035 | 0.066 | 0.520 | 0.605 | 0.024 | |
| scale(Pre_MP) | Total_Response_AOIs_prop_Correct_Pretest | scale(Pre_MP) | 0.006 | 0.087 | 0.073 | 0.942 | 0.019 | |
| scale(MA):scale(Pre_MP) | Total_Response_AOIs_prop_Correct_Pretest | scale(MA):scale(Pre_MP) | -0.029 | 0.052 | -0.564 | 0.574 | -0.020 | |
| (Intercept)1 | Total_Hits_Timer_prop_Correct_Pretest | (Intercept) | -5.440 | 0.136 | -39.977 | <2e-16 | 0.649 | NA |
| scale(MA)1 | Total_Hits_Timer_prop_Correct_Pretest | scale(MA) | -0.011 | 0.143 | -0.078 | 0.938 | 0.019 | |
| scale(Pre_MP)1 | Total_Hits_Timer_prop_Correct_Pretest | scale(Pre_MP) | -0.169 | 0.132 | -1.279 | 0.205 | -0.081 | |
| scale(MA):scale(Pre_MP)1 | Total_Hits_Timer_prop_Correct_Pretest | scale(MA):scale(Pre_MP) | -0.014 | 0.096 | -0.145 | 0.885 | 0.028 | |
| (Intercept)2 | New_Response_AOIs_Correct_Pretest | (Intercept) | 2.523 | 0.102 | 24.798 | <2e-16 | 0.796 | NA |
| scale(MA)2 | New_Response_AOIs_Correct_Pretest | scale(MA) | 0.148 | 0.098 | 1.520 | 0.133 | 0.074 | |
| scale(Pre_MP)2 | New_Response_AOIs_Correct_Pretest | scale(Pre_MP) | -0.063 | 0.137 | -0.463 | 0.645 | -0.033 | |
| scale(MA):scale(Pre_MP)2 | New_Response_AOIs_Correct_Pretest | scale(MA):scale(Pre_MP) | 0.049 | 0.104 | 0.472 | 0.638 | 0.020 |
aoi_group_columns_pretest <- c("Total_Problem_AOIs_prop_Pretest",
"Total_Response_AOIs_prop_Pretest",
"Total_Hits_Timer_prop_Pretest",
"New_Problem_AOIs_Pretest", "New_Response_AOIs_Pretest",
"Numerator_Denominator_Transitions_Pretest",
"Total_Hits_Outside_of_Screen_prop_Pretest")
# Create empty dataframe for storing results
model_summaries <- data.frame()
for(dv in aoi_group_columns_pretest) {
# Fit model
formula <- as.formula(paste0("log(", dv, ") ~ scale(MA) * scale(Pre_MP)"))
model <- lm(
formula,
data = AOI_student_data_pretest
)
# Shapiro-Wilk test for normality
shapiro_test <- shapiro.test(resid(model))
print(paste("Shapiro-Wilk test for", dv, ": W =", round(shapiro_test$statistic, 3),
"p =", format.pval(shapiro_test$p.value, digits = 3)))
bptest(model) # if p > 0.05 -- no strong evidence of heteroscedasticity (OLS is okay)
# Get model summary
sum_model <- summary(model)
# Extract fixed effects
fixed_effects <- sum_model$coefficient
# Get standardized effect sizes
beta <- standardize_parameters(model)
# Add to summary dataframe
temp_df <- data.frame(
DV = dv,
Predictor = rownames(fixed_effects),
Estimate = round(fixed_effects[,"Estimate"], 3),
SE = round(fixed_effects[,"Std. Error"], 3),
t_value = round(fixed_effects[,"t value"], 3),
p_value = fixed_effects[,"Pr(>|t|)"],
Std_Beta = round(beta$Std_Coefficient, 3)
)
model_summaries <- rbind(model_summaries, temp_df)
}
## [1] "Shapiro-Wilk test for Total_Problem_AOIs_prop_Pretest : W = 0.987 p = 0.47"
## [1] "Shapiro-Wilk test for Total_Response_AOIs_prop_Pretest : W = 0.871 p = 1.23e-07"
## [1] "Shapiro-Wilk test for Total_Hits_Timer_prop_Pretest : W = 0.981 p = 0.194"
## [1] "Shapiro-Wilk test for New_Problem_AOIs_Pretest : W = 0.996 p = 0.988"
## [1] "Shapiro-Wilk test for New_Response_AOIs_Pretest : W = 0.937 p = 0.000186"
## [1] "Shapiro-Wilk test for Numerator_Denominator_Transitions_Pretest : W = 0.993 p = 0.925"
## [1] "Shapiro-Wilk test for Total_Hits_Outside_of_Screen_prop_Pretest : W = 0.903 p = 3.06e-06"
# Format results
model_summaries <- model_summaries %>%
mutate(
p_value = format.pval(p_value, digits = 3),
Significant = ifelse(as.numeric(p_value) < 0.05, "*", "")
)
# Display formatted table
kable(model_summaries,
caption = "Summary of Gaussian Models with log()",
digits = 3) %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed"))
| DV | Predictor | Estimate | SE | t_value | p_value | Std_Beta | Significant | |
|---|---|---|---|---|---|---|---|---|
| (Intercept) | Total_Problem_AOIs_prop_Pretest | (Intercept) | -1.961 | 0.033 | -59.708 | <2e-16 | 0.958 | NA |
| scale(MA) | Total_Problem_AOIs_prop_Pretest | scale(MA) | 0.018 | 0.033 | 0.542 | 0.5890 | 0.019 | |
| scale(Pre_MP) | Total_Problem_AOIs_prop_Pretest | scale(Pre_MP) | -0.001 | 0.034 | -0.015 | 0.9877 | -0.007 | |
| scale(MA):scale(Pre_MP) | Total_Problem_AOIs_prop_Pretest | scale(MA):scale(Pre_MP) | 0.023 | 0.033 | 0.686 | 0.4947 | 0.028 | |
| (Intercept)1 | Total_Response_AOIs_prop_Pretest | (Intercept) | -3.938 | 0.071 | -55.221 | <2e-16 | 1.040 | NA |
| scale(MA)1 | Total_Response_AOIs_prop_Pretest | scale(MA) | 0.033 | 0.072 | 0.454 | 0.6509 | 0.028 | |
| scale(Pre_MP)1 | Total_Response_AOIs_prop_Pretest | scale(Pre_MP) | -0.068 | 0.075 | -0.908 | 0.3663 | -0.029 | |
| scale(MA):scale(Pre_MP)1 | Total_Response_AOIs_prop_Pretest | scale(MA):scale(Pre_MP) | 0.022 | 0.071 | 0.303 | 0.7629 | 0.000 | |
| (Intercept)2 | Total_Hits_Timer_prop_Pretest | (Intercept) | -5.390 | 0.099 | -54.541 | <2e-16 | 0.676 | NA |
| scale(MA)2 | Total_Hits_Timer_prop_Pretest | scale(MA) | -0.018 | 0.100 | -0.176 | 0.8606 | 0.020 | |
| scale(Pre_MP)2 | Total_Hits_Timer_prop_Pretest | scale(Pre_MP) | -0.079 | 0.103 | -0.766 | 0.4457 | -0.005 | |
| scale(MA):scale(Pre_MP)2 | Total_Hits_Timer_prop_Pretest | scale(MA):scale(Pre_MP) | 0.031 | 0.099 | 0.315 | 0.7535 | 0.043 | |
| (Intercept)3 | New_Problem_AOIs_Pretest | (Intercept) | 4.472 | 0.067 | 67.167 | <2e-16 | 0.749 | NA |
| scale(MA)3 | New_Problem_AOIs_Pretest | scale(MA) | 0.079 | 0.067 | 1.183 | 0.2398 | 0.055 | |
| scale(Pre_MP)3 | New_Problem_AOIs_Pretest | scale(Pre_MP) | -0.104 | 0.070 | -1.491 | 0.1394 | -0.061 | |
| scale(MA):scale(Pre_MP)3 | New_Problem_AOIs_Pretest | scale(MA):scale(Pre_MP) | 0.137 | 0.067 | 2.048 | 0.0434 | 0.096 |
|
| (Intercept)4 | New_Response_AOIs_Pretest | (Intercept) | 2.650 | 0.093 | 28.457 | <2e-16 | 0.780 | NA |
| scale(MA)4 | New_Response_AOIs_Pretest | scale(MA) | 0.115 | 0.094 | 1.223 | 0.2243 | 0.053 | |
| scale(Pre_MP)4 | New_Response_AOIs_Pretest | scale(Pre_MP) | -0.146 | 0.097 | -1.501 | 0.1367 | -0.073 | |
| scale(MA):scale(Pre_MP)4 | New_Response_AOIs_Pretest | scale(MA):scale(Pre_MP) | 0.115 | 0.093 | 1.228 | 0.2226 | 0.056 | |
| (Intercept)5 | Numerator_Denominator_Transitions_Pretest | (Intercept) | 3.073 | 0.069 | 44.397 | <2e-16 | 0.768 | NA |
| scale(MA)5 | Numerator_Denominator_Transitions_Pretest | scale(MA) | 0.057 | 0.070 | 0.819 | 0.4148 | 0.041 | |
| scale(Pre_MP)5 | Numerator_Denominator_Transitions_Pretest | scale(Pre_MP) | -0.131 | 0.072 | -1.810 | 0.0736 | -0.071 | |
| scale(MA):scale(Pre_MP)5 | Numerator_Denominator_Transitions_Pretest | scale(MA):scale(Pre_MP) | 0.151 | 0.069 | 2.172 | 0.0324 | 0.097 |
|
| (Intercept)6 | Total_Hits_Outside_of_Screen_prop_Pretest | (Intercept) | -1.567 | 0.057 | -27.720 | <2e-16 | 1.136 | NA |
| scale(MA)6 | Total_Hits_Outside_of_Screen_prop_Pretest | scale(MA) | -0.103 | 0.057 | -1.799 | 0.0752 | -0.073 | |
| scale(Pre_MP)6 | Total_Hits_Outside_of_Screen_prop_Pretest | scale(Pre_MP) | -0.023 | 0.059 | -0.397 | 0.6923 | -0.014 | |
| scale(MA):scale(Pre_MP)6 | Total_Hits_Outside_of_Screen_prop_Pretest | scale(MA):scale(Pre_MP) | -0.037 | 0.057 | -0.649 | 0.5178 | -0.021 |
# Plotting by continuous
ggplot(AOI_student_data_pretest,
aes(x = Pre_MP,
y = Numerator_Denominator_Transitions_Pretest ,
color = MA_level)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95, alpha = 0.3) +
geom_jitter(alpha = 0.5) +
labs(x = "Pre_MP",
y = "Numerator_Denominator_Transitions_Pretest",
color = "MA_level") +
theme_minimal() +
theme(legend.position = "bottom")
# Plotting by continuous
ggplot(AOI_student_data_pretest,
aes(x = Pre_MP,
y = New_Problem_AOIs_Pretest ,
color = MA_level)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95, alpha = 0.3) +
geom_jitter(alpha = 0.5) +
labs(x = "Pre_MP",
y = "New_Problem_AOIs_Pretest",
color = "MA_level") +
theme_minimal() +
theme(legend.position = "bottom")
pretest_problem_level_AOI_math_student_data <- problem_level_AOI_math_student_data %>% filter (grepl("^P", Image))
pretest_problem_level_AOI_math_student_data_correct <- pretest_problem_level_AOI_math_student_data %>% filter (Correct == "TRUE")
# Modeling with HLM
model <- lmer(log(New_Problem_AOIs + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data_correct)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(New_Problem_AOIs + 1e-13) ~ scale(MA) * scale(Pre_MP) + (1 |
## Image) + (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data_correct
##
## REML criterion at convergence: 1050.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1642 -0.5817 -0.0021 0.6144 2.7987
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.32731 0.5721
## problem_Order (Intercept) 0.03158 0.1777
## Image (Intercept) 0.06490 0.2547
## Residual 0.19929 0.4464
## Number of obs: 634, groups: pid, 82; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 4.25305 0.11410 37.275
## scale(MA) 0.13555 0.07365 1.841
## scale(Pre_MP) -0.11717 0.06481 -1.808
## scale(MA):scale(Pre_MP) 0.05687 0.04697 1.211
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.045
## scal(Pr_MP) 0.079 0.166
## s(MA):(P_MP 0.119 0.358 -0.477
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.99761, p-value = 0.5052
# Plotting by continuous
ggplot(pretest_problem_level_AOI_math_student_data_correct,
aes(x = Pre_MP,
y = New_Problem_AOIs ,
color = MA_level)) +
geom_point(alpha = 0.3) +
geom_smooth(method = "lm", level = 0.95, alpha = 0.3) +
geom_jitter() +
labs(x = "MP",
y = "New_Problem_AOIs across correct",
color = "MA_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
# Modeling with HLM
model <- lmer(log(New_Response_AOIs + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data_correct)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(New_Response_AOIs + 1e-13) ~ scale(MA) * scale(Pre_MP) +
## (1 | Image) + (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data_correct
##
## REML criterion at convergence: 3959.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.9601 -0.0490 0.0677 0.1768 4.2076
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 13.96770 3.7373
## problem_Order (Intercept) 0.03601 0.1898
## Image (Intercept) 0.46696 0.6833
## Residual 24.29589 4.9291
## Number of obs: 634, groups: pid, 82; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 1.09023 0.53090 2.054
## scale(MA) 0.24068 0.50977 0.472
## scale(Pre_MP) -0.40829 0.46645 -0.875
## scale(MA):scale(Pre_MP) -0.02847 0.34600 -0.082
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.065
## scal(Pr_MP) 0.087 0.170
## s(MA):(P_MP 0.187 0.326 -0.456
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.49625, p-value < 2.2e-16
# Modeling with HLM
model <- lmer(log(Numerator_Denominator_Transitions + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data_correct)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(Numerator_Denominator_Transitions + 1e-13) ~ scale(MA) *
## scale(Pre_MP) + (1 | Image) + (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data_correct
##
## REML criterion at convergence: 3784.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -6.6833 -0.0203 0.1045 0.2352 1.4536
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 3.12461 1.7677
## problem_Order (Intercept) 0.08933 0.2989
## Image (Intercept) 0.51562 0.7181
## Residual 20.40991 4.5177
## Number of obs: 634, groups: pid, 82; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 2.02075 0.36041 5.607
## scale(MA) -0.21324 0.29099 -0.733
## scale(Pre_MP) -0.18198 0.28472 -0.639
## scale(MA):scale(Pre_MP) -0.07534 0.21865 -0.345
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.048
## scal(Pr_MP) 0.020 0.177
## s(MA):(P_MP 0.170 0.261 -0.419
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.35009, p-value < 2.2e-16
# Modeling with HLM
model <- lmer(log(Total_Problem_AOIs_prop + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data_correct)
## boundary (singular) fit: see help('isSingular')
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(Total_Problem_AOIs_prop + 1e-13) ~ scale(MA) * scale(Pre_MP) +
## (1 | Image) + (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data_correct
##
## REML criterion at convergence: 502.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.2928 -0.5404 0.0679 0.6071 2.6819
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.101593 0.31874
## problem_Order (Intercept) 0.002408 0.04907
## Image (Intercept) 0.000000 0.00000
## Residual 0.094124 0.30680
## Number of obs: 634, groups: pid, 82; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) -2.0193310 0.0424341 -47.587
## scale(MA) 0.0208743 0.0417368 0.500
## scale(Pre_MP) -0.0092462 0.0371760 -0.249
## scale(MA):scale(Pre_MP) 0.0003585 0.0271851 0.013
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.068
## scal(Pr_MP) 0.112 0.167
## s(MA):(P_MP 0.185 0.348 -0.470
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.98609, p-value = 9.814e-06
# Modeling with HLM
model <- lmer(log(Total_Response_AOIs_prop + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data_correct)
## boundary (singular) fit: see help('isSingular')
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(Total_Response_AOIs_prop + 1e-13) ~ scale(MA) * scale(Pre_MP) +
## (1 | Image) + (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data_correct
##
## REML criterion at convergence: 3680.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -6.0292 -0.0538 0.0684 0.1857 4.2687
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 9.0909 3.0151
## problem_Order (Intercept) 0.0000 0.0000
## Image (Intercept) 0.1375 0.3708
## Residual 15.6993 3.9622
## Number of obs: 634, groups: pid, 82; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) -5.13754 0.40901 -12.561
## scale(MA) 0.11439 0.41098 0.278
## scale(Pre_MP) -0.23804 0.37573 -0.634
## scale(MA):scale(Pre_MP) -0.07739 0.27869 -0.278
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.068
## scal(Pr_MP) 0.092 0.170
## s(MA):(P_MP 0.196 0.327 -0.457
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.50425, p-value < 2.2e-16
# Modeling with HLM
model <- lmer(log(Total_Hits_Outside_of_Screen_prop + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data_correct)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(Total_Hits_Outside_of_Screen_prop + 1e-13) ~ scale(MA) *
## scale(Pre_MP) + (1 | Image) + (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data_correct
##
## REML criterion at convergence: 1228.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.1265 -0.4503 0.0880 0.5186 3.5203
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.368546 0.60708
## problem_Order (Intercept) 0.002979 0.05458
## Image (Intercept) 0.005095 0.07138
## Residual 0.291756 0.54014
## Number of obs: 634, groups: pid, 82; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) -1.67751 0.07995 -20.983
## scale(MA) -0.12930 0.07892 -1.638
## scale(Pre_MP) -0.05464 0.06996 -0.781
## scale(MA):scale(Pre_MP) -0.02640 0.05096 -0.518
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.069
## scal(Pr_MP) 0.115 0.166
## s(MA):(P_MP 0.185 0.352 -0.473
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.94437, p-value = 1.113e-14
# Modeling with HLM
model <- lmer(log(New_Problem_AOIs + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(New_Problem_AOIs + 1e-13) ~ scale(MA) * scale(Pre_MP) + (1 |
## Image) + (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data
##
## REML criterion at convergence: 1883.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.8112 -0.6042 0.0068 0.5869 3.7951
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.33398 0.5779
## problem_Order (Intercept) 0.03778 0.1944
## Image (Intercept) 0.04573 0.2139
## Residual 0.25577 0.5057
## Number of obs: 1053, groups: pid, 96; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 4.31700 0.10757 40.134
## scale(MA) 0.08034 0.06884 1.167
## scale(Pre_MP) -0.08060 0.07030 -1.147
## scale(MA):scale(Pre_MP) 0.14016 0.06780 2.067
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.020
## scal(Pr_MP) -0.069 0.411
## s(MA):(P_MP 0.276 0.054 -0.265
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.99265, p-value = 4.401e-05
# Plotting by continuous
ggplot(pretest_problem_level_AOI_math_student_data,
aes(x = Pre_MP,
y = New_Problem_AOIs ,
color = MA_level)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95, alpha = 0.3) +
geom_jitter(alpha = 0.3) +
labs(x = "MP",
y = "New_Problem_AOIs across all",
color = "MA_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
# Modeling with HLM
model <- lmer(log(New_Response_AOIs + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(New_Response_AOIs + 1e-13) ~ scale(MA) * scale(Pre_MP) +
## (1 | Image) + (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data
##
## REML criterion at convergence: 6520.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -6.0964 -0.0417 0.0765 0.2009 3.8063
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 10.2081 3.195
## problem_Order (Intercept) 0.1764 0.420
## Image (Intercept) 0.2735 0.523
## Residual 24.2650 4.926
## Number of obs: 1053, groups: pid, 96; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 1.4770 0.4450 3.320
## scale(MA) 0.1595 0.4055 0.393
## scale(Pre_MP) -0.4531 0.4151 -1.092
## scale(MA):scale(Pre_MP) 0.2904 0.3998 0.726
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.028
## scal(Pr_MP) -0.099 0.410
## s(MA):(P_MP 0.393 0.055 -0.267
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.44991, p-value < 2.2e-16
# Modeling with HLM
model <- lmer(log(Numerator_Denominator_Transitions + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(Numerator_Denominator_Transitions + 1e-13) ~ scale(MA) *
## scale(Pre_MP) + (1 | Image) + (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data
##
## REML criterion at convergence: 6249
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -6.8618 -0.0313 0.0900 0.2219 2.3471
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 3.76920 1.9414
## problem_Order (Intercept) 0.05533 0.2352
## Image (Intercept) 0.33178 0.5760
## Residual 19.74153 4.4431
## Number of obs: 1053, groups: pid, 96; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 2.2320 0.3229 6.912
## scale(MA) -0.1857 0.2711 -0.685
## scale(Pre_MP) -0.2146 0.2784 -0.771
## scale(MA):scale(Pre_MP) 0.2981 0.2676 1.114
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.025
## scal(Pr_MP) -0.093 0.408
## s(MA):(P_MP 0.362 0.056 -0.269
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.36228, p-value < 2.2e-16
# Modeling with HLM
model <- lmer(log(Total_Problem_AOIs_prop + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(Total_Problem_AOIs_prop + 1e-13) ~ scale(MA) * scale(Pre_MP) +
## (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data
##
## REML criterion at convergence: 914.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.5411 -0.5362 0.0516 0.6169 3.1032
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.087784 0.29628
## problem_Order (Intercept) 0.003421 0.05849
## Residual 0.110306 0.33212
## Number of obs: 1053, groups: pid, 96; problem_Order, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) -2.00747 0.03935 -51.018
## scale(MA) 0.01925 0.03601 0.535
## scale(Pre_MP) 0.00278 0.03680 0.076
## scale(MA):scale(Pre_MP) 0.03042 0.03548 0.858
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.028
## scal(Pr_MP) -0.099 0.411
## s(MA):(P_MP 0.395 0.054 -0.265
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.97232, p-value = 2.73e-13
# Modeling with HLM
model <- lmer(log(Total_Response_AOIs_prop + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(Total_Response_AOIs_prop + 1e-13) ~ scale(MA) * scale(Pre_MP) +
## (1 | Image) + (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data
##
## REML criterion at convergence: 6065
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -6.1584 -0.0400 0.0812 0.1963 3.7212
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 6.37247 2.5244
## problem_Order (Intercept) 0.03391 0.1841
## Image (Intercept) 0.08363 0.2892
## Residual 15.87316 3.9841
## Number of obs: 1053, groups: pid, 96; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) -4.85994 0.33292 -14.598
## scale(MA) 0.08509 0.32171 0.265
## scale(Pre_MP) -0.31348 0.32938 -0.952
## scale(MA):scale(Pre_MP) 0.16518 0.31719 0.521
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.029
## scal(Pr_MP) -0.105 0.410
## s(MA):(P_MP 0.417 0.055 -0.267
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.45529, p-value < 2.2e-16
# Modeling with HLM
model <- lmer(log(Total_Hits_Outside_of_Screen_prop + 0.0000000000001) ~ scale(MA) * scale(Pre_MP) + (1|Image) + (1|problem_Order) + (1|pid), data = pretest_problem_level_AOI_math_student_data)
## boundary (singular) fit: see help('isSingular')
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(Total_Hits_Outside_of_Screen_prop + 1e-13) ~ scale(MA) *
## scale(Pre_MP) + (1 | Image) + (1 | problem_Order) + (1 | pid)
## Data: pretest_problem_level_AOI_math_student_data
##
## REML criterion at convergence: 3660.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -19.9389 -0.1781 0.0871 0.2912 2.4961
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.486957 0.69782
## problem_Order (Intercept) 0.000000 0.00000
## Image (Intercept) 0.005621 0.07498
## Residual 1.643438 1.28197
## Number of obs: 1053, groups: pid, 96; problem_Order, 12; Image, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) -1.74376 0.09327 -18.696
## scale(MA) -0.10639 0.09176 -1.160
## scale(Pre_MP) -0.04951 0.09405 -0.526
## scale(MA):scale(Pre_MP) -0.03299 0.09051 -0.365
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) s(P_MP
## scale(MA) 0.030
## scal(Pr_MP) -0.108 0.409
## s(MA):(P_MP 0.425 0.055 -0.268
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.33001, p-value < 2.2e-16
Will two math anxious clusters differ between each other in executive functions?
## Rows: 124 Columns: 10
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (3): pid, handedness, bid
## dbl (7): age, STROOP.rcs.overall, TASKSWITCH.rcs.overall, BRT.rt_mean.correc...
##
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
## NAs in EF data
sum(is.na(student_data_EF$Inhibition))
## [1] 0
sum(is.na(student_data_EF$Flexibility))
## [1] 0
sum(is.na(student_data_EF$WM))
## [1] 10
# WM plot
p1 <- ggplot(student_data_EF, aes(x = WM)) +
geom_histogram(fill = "#69b3a2", color = "white", bins = 20, alpha = 0.8) +
labs(title = "WM", x = "Working Memory (Corsi Block)", y = "Number of Students") +
coord_cartesian(xlim = c(0, 11)) +
geom_vline(xintercept = c(5, 9), linetype = "dotted") +
theme_minimal()
# Inhibition plot
p2 <- ggplot(student_data_EF, aes(x = Inhibition)) +
geom_histogram(fill = "#404080", color = "white", bins = 20, alpha = 0.8) +
labs(title = "Inhibition", x = "Inhibition (Stroop Task)", y = "Number of Students") +
coord_cartesian(xlim = c(0, 3)) +
geom_vline(xintercept = c(0.63, 2.28), linetype = "dotted") +
theme_minimal()
# Flexibility plot
p3 <- ggplot(student_data_EF, aes(x = Flexibility)) +
geom_histogram(fill = "#e07b91", color = "white", bins = 20, alpha = 0.8) +
labs(title = "Shifting", x = "Shifting (Task Switch)", y = "Number of Students") +
coord_cartesian(xlim = c(0, 3)) +
geom_vline(xintercept = c(0.60, 2.51), linetype = "dotted") +
theme_minimal()
library(patchwork) # for combining plots
# Combine in one row
p1 + p2 + p3 + plot_layout(nrow = 1)
## Warning: Removed 10 rows containing non-finite outside the scale range
## (`stat_bin()`).
student_data_EF %>%
summarise(across(c(WM, Inhibition, Flexibility),
list(mean = ~mean(.x, na.rm = TRUE),
sd = ~sd(.x, na.rm = TRUE),
min = ~min(.x, na.rm = TRUE),
max = ~max(.x, na.rm = TRUE)))) %>%
pivot_longer(cols = everything(),
names_to = c("Variable", "Stat"),
names_sep = "_") %>%
pivot_wider(names_from = Stat, values_from = value) %>%
mutate(
`Min` = sprintf("%.2f", min),
`Mean ± SD` = sprintf("%.2f ± %.2f", mean, sd),
`Max` = sprintf("%.2f", max)
) %>%
select(Variable, Min, `Mean ± SD`,Max)
## # A tibble: 3 × 4
## Variable Min `Mean ± SD` Max
## <chr> <chr> <chr> <chr>
## 1 WM 5.00 7.25 ± 0.86 9.00
## 2 Inhibition 0.52 1.75 ± 0.29 2.39
## 3 Flexibility 0.85 1.79 ± 0.45 2.79
## MA, MP and time
data_quant <-
student_data_EF [, (colnames(student_data_EF) %in%
c('MA', 'Pre_MP', 'Pre_Time',
'arousal_Pretest_mean', 'arousal_PrePre', 'valence_Pretest_mean', 'valence_PrePre',
'WM', 'Inhibition', 'Flexibility'))]
# Creating matrix
apa.cor.table(
data = data_quant,
filename = "Descriptives.doc",
table.number = 1,
show.conf.interval = TRUE,
show.sig.stars = TRUE,
landscape = TRUE
)
##
##
## Table 1
##
## Means, standard deviations, and correlations with confidence intervals
##
##
## Variable M SD 1 2 3
## 1. Inhibition 1.75 0.29
##
## 2. Flexibility 1.79 0.45 .61**
## [.48, .71]
##
## 3. WM 7.25 0.86 .25** .36**
## [.07, .42] [.18, .52]
##
## 4. MA 2.61 0.95 -.05 .01 -.06
## [-.23, .13] [-.17, .20] [-.25, .13]
##
## 5. arousal_PrePre 3.97 2.18 -.09 -.07 -.04
## [-.27, .09] [-.25, .11] [-.22, .16]
##
## 6. valence_PrePre 4.99 1.87 -.13 .02 -.11
## [-.31, .05] [-.16, .20] [-.30, .08]
##
## 7. Pre_MP 0.59 0.34 .09 .10 .11
## [-.09, .27] [-.08, .28] [-.08, .30]
##
## 8. Pre_Time 38.47 18.67 -.06 -.17 -.23*
## [-.24, .12] [-.34, .02] [-.40, -.04]
##
## 9. arousal_Pretest_mean 4.91 2.05 -.05 -.11 -.04
## [-.23, .13] [-.28, .08] [-.23, .15]
##
## 10. valence_Pretest_mean 4.35 1.88 -.05 .04 -.03
## [-.23, .13] [-.14, .22] [-.22, .16]
##
## 4 5 6 7 8 9
##
##
##
##
##
##
##
##
##
##
##
## .44**
## [.28, .57]
##
## -.30** -.18*
## [-.46, -.13] [-.35, -.00]
##
## -.43** -.28** .09
## [-.57, -.27] [-.44, -.10] [-.09, .27]
##
## .17 .02 .08 -.17
## [-.01, .34] [-.17, .20] [-.10, .26] [-.34, .01]
##
## .54** .69** -.16 -.35** -.00
## [.39, .65] [.58, .77] [-.33, .02] [-.50, -.18] [-.19, .18]
##
## -.29** -.12 .64** .16 .04 -.22*
## [-.44, -.11] [-.30, .06] [.52, .74] [-.03, .33] [-.15, .22] [-.39, -.04]
##
##
## Note. M and SD are used to represent mean and standard deviation, respectively.
## Values in square brackets indicate the 95% confidence interval.
## The confidence interval is a plausible range of population correlations
## that could have caused the sample correlation (Cumming, 2014).
## * indicates p < .05. ** indicates p < .01.
##
# Your sample stats
sample_stats <- student_data_EF %>%
summarise(across(c(WM, Inhibition, Flexibility),
list(mean = ~mean(.x, na.rm = TRUE),
sd = ~sd(.x, na.rm = TRUE),
min = ~min(.x, na.rm = TRUE),
max = ~max(.x, na.rm = TRUE)))) %>%
pivot_longer(everything(), names_to = c("Variable", "Stat"), names_sep = "_") %>%
pivot_wider(names_from = Stat, values_from = value) %>%
mutate(Source = "Our sample")
# Normative values (manually entered)
norms <- tibble(
Variable = c("WM", "Inhibition", "Flexibility"),
mean = c(5.78, 1.56, 1.32),
sd = c(0.85, 0.24, 0.35),
min = c(5.00, 0.83, 0.70),
max = c(9.00, 2.28, 2.51),
Source = "Norm"
)
# Combine
combined_stats <- bind_rows(sample_stats, norms)
# Option 1 plot
ggplot(combined_stats, aes(x = Variable, y = mean, color = Source, group = Source)) +
geom_point(position = position_dodge(width = 0.5), size = 3) +
geom_errorbar(aes(ymin = mean - sd, ymax = mean + sd),
width = 0.15, position = position_dodge(width = 0.5)) +
geom_linerange(aes(ymin = min, ymax = max),
position = position_dodge(width = 0.5),
linetype = "dotted", size = 0.8) +
labs(title = "Comparison of Sample and Norm Values for EF Measures",
y = "Score",
x = "Variable",
color = "Source") +
theme_minimal()
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
# Option 2 plot
ggplot(combined_stats, aes(y = Variable, x = mean, fill = Source)) +
geom_col(position = position_dodge(width = 0.7), width = 0.6) +
geom_errorbar(aes(xmin = mean - sd, xmax = mean + sd),
position = position_dodge(width = 0.7), width = 0.2) +
geom_point(aes(x = min), shape = 4, size = 2, position = position_dodge(width = 0.7)) +
geom_point(aes(x = max), shape = 4, size = 2, position = position_dodge(width = 0.7)) +
labs(title = "EF Measures: Sample vs. Norm (Mean ± SD with Min/Max)",
x = "Score", y = "Variable", fill = "Sample type") +
theme_minimal()
print ("Participants with Inhibition and Flexibility data")
## [1] "Participants with Inhibition and Flexibility data"
student_data_EF %>%
group_by (pre_cluster_groups) %>%
summarise(n = n())
## # A tibble: 3 × 2
## pre_cluster_groups n
## <chr> <int>
## 1 hMP_lMA 39
## 2 hMP_mMA 45
## 3 lMP_mMA 33
print ("Participants with WM data")
## [1] "Participants with WM data"
student_data_EF %>%
group_by (pre_cluster_groups) %>%
summarise(n = n())
## # A tibble: 3 × 2
## pre_cluster_groups n
## <chr> <int>
## 1 hMP_lMA 39
## 2 hMP_mMA 45
## 3 lMP_mMA 33
EF_cols <- c("WM", "Inhibition", "Flexibility")
## -- Analysis
# Function to run ANOVA and Tukey HSD, returning tidy results
run_tukey <- function(outcome_var, data, group_var) {
# ANOVA
formula <- as.formula(paste(outcome_var, "~", group_var))
aov_result <- aov(formula, data = data)
# Extract ANOVA results
anova_summary <- summary(aov_result)[[1]]
anova_df <- data.frame(
outcome = outcome_var,
df = anova_summary$Df[1],
sum_sq = anova_summary$`Sum Sq`[1],
mean_sq = anova_summary$`Mean Sq`[1],
F_value = anova_summary$`F value`[1],
p_value = anova_summary$`Pr(>F)`[1]
)
# Tukey's HSD
tukey_result <- TukeyHSD(aov_result)
tukey_df <- as.data.frame(tukey_result[[group_var]])
tukey_df$comparison <- rownames(tukey_df)
# Return tidy Tukey results
tukey_df %>%
transmute(
outcome = outcome_var,
comparison,
diff,
lwr,
upr,
p_adj = `p adj`
) %>%
mutate(anova_p_value = anova_df$p_value)
}
# Run all tests and combine results
all_tukey_tests <- map_df(EF_cols,
~run_tukey(.x, data = student_data_EF, group_var = "pre_cluster_groups"))
# View results in nice format
all_tukey_tests %>%
arrange(outcome, p_adj) %>%
mutate(across(c(diff, lwr, upr, p_adj, anova_p_value), ~round(., 4))) %>%
knitr::kable()
| outcome | comparison | diff | lwr | upr | p_adj | anova_p_value | |
|---|---|---|---|---|---|---|---|
| lMP_mMA-hMP_mMA…1 | Flexibility | lMP_mMA-hMP_mMA | -0.2281 | -0.4710 | 0.0149 | 0.0706 | 0.0625 |
| hMP_mMA-hMP_lMA…2 | Flexibility | hMP_mMA-hMP_lMA | 0.1703 | -0.0616 | 0.4022 | 0.1934 | 0.0625 |
| lMP_mMA-hMP_lMA…3 | Flexibility | lMP_mMA-hMP_lMA | -0.0577 | -0.3084 | 0.1930 | 0.8483 | 0.0625 |
| lMP_mMA-hMP_mMA…4 | Inhibition | lMP_mMA-hMP_mMA | -0.1003 | -0.2577 | 0.0571 | 0.2883 | 0.2865 |
| lMP_mMA-hMP_lMA…5 | Inhibition | lMP_mMA-hMP_lMA | -0.0854 | -0.2479 | 0.0770 | 0.4270 | 0.2865 |
| hMP_mMA-hMP_lMA…6 | Inhibition | hMP_mMA-hMP_lMA | 0.0149 | -0.1354 | 0.1651 | 0.9699 | 0.2865 |
| lMP_mMA-hMP_lMA…7 | WM | lMP_mMA-hMP_lMA | -0.3134 | -0.8293 | 0.2025 | 0.3221 | 0.3487 |
| lMP_mMA-hMP_mMA…8 | WM | lMP_mMA-hMP_mMA | -0.2038 | -0.6919 | 0.2844 | 0.5832 | 0.3487 |
| hMP_mMA-hMP_lMA…9 | WM | hMP_mMA-hMP_lMA | -0.1096 | -0.5756 | 0.3564 | 0.8419 | 0.3487 |
## -- Boxplots
# Create boxplot for WM
ggplot(student_data_EF,
aes(x = pre_cluster_groups, y = WM, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7, na.rm = TRUE) +
geom_jitter(width = 0.2, alpha = 0.4, na.rm = TRUE) +
theme_minimal() +
labs(x = "Group",
y = "WM",
title = "WM Comparison") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Violin
ggplot(student_data_EF,
aes(x = pre_cluster_groups, y = WM, fill = pre_cluster_groups)) +
geom_violin(trim = FALSE, alpha = 0.6) +
geom_boxplot(width = 0.1, outlier.shape = NA, color = "black") +
geom_jitter(width = 0.2, alpha = 0.3, color = "black") +
theme_minimal() +
labs(x = "Group",
y = "Working Memory (WM)",
title = "Distribution of WM by Cluster Group") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
## Warning: Removed 10 rows containing non-finite outside the scale range
## (`stat_ydensity()`).
## Warning: Removed 10 rows containing non-finite outside the scale range
## (`stat_boxplot()`).
## Warning: Removed 10 rows containing missing values or values outside the scale range
## (`geom_point()`).
# Create boxplot for Inhibition
ggplot(student_data_EF,
aes(x = pre_cluster_groups, y = Inhibition, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Inhibition",
title = "Inhibition Comparison") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Create boxplot for Flexibility
ggplot(student_data_EF,
aes(x = pre_cluster_groups, y = Flexibility, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(x = "Group",
y = "Flexibility",
title = "Flexibility Comparison") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Violin
ggplot(student_data_EF,
aes(x = pre_cluster_groups, y = Flexibility, fill = pre_cluster_groups)) +
geom_violin(trim = FALSE, alpha = 0.6) +
geom_boxplot(width = 0.1, outlier.shape = NA, color = "black") +
geom_jitter(width = 0.2, alpha = 0.3, color = "black") +
theme_minimal() +
labs(x = "Group",
y = "Working Memory (Flexibility)",
title = "Distribution of Flexibility by Cluster Group") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Plots with mean and CI
summary_data <- student_data_EF %>%
group_by(pre_cluster_groups) %>%
summarise(mean_Flexibility = mean(Flexibility, na.rm=TRUE),
se = sd(Flexibility, na.rm=TRUE) / sqrt(n()),
ci_lower = mean_Flexibility - qt(0.975, df = n() - 1) * se,
ci_upper = mean_Flexibility + qt(0.975, df = n() - 1) * se)
ggplot(summary_data, aes(x = pre_cluster_groups, y = mean_Flexibility, fill = pre_cluster_groups)) +
geom_col(alpha = 0.7, width = 0.5) +
geom_errorbar(aes(ymin = ci_lower, ymax = ci_upper), width = 0.2) +
theme_minimal() +
labs(x = "Group",
y = "Mean Inhibition",
title = "Mean Inhibition with 95% CI (All Responses)") +
theme(legend.position = "none") +
scale_fill_brewer(palette = "Set2")
# Function to check ANOVA assumptions
check_anova_assumptions <- function(outcome_var, data, group_var) {
formula <- as.formula(paste(outcome_var, "~", group_var))
aov_result <- aov(formula, data = data)
residuals <- residuals(aov_result)
# Normality test (Shapiro-Wilk)
normality_test <- shapiro.test(residuals)
# Homogeneity of variance (Levene's test)
levene_test <- leveneTest(formula, data = data)
list(
normality_p = normality_test$p.value,
levene_p = levene_test$`Pr(>F)`[1]
)
}
# Function to run ANOVA and Tukey HSD, returning tidy results
run_tukey <- function(outcome_var, data, group_var) {
# Check assumptions first
assumptions <- check_anova_assumptions(outcome_var, data, group_var)
if (assumptions$normality_p < 0.05 | assumptions$levene_p < 0.05) {
warning(paste("Assumptions violated for", outcome_var, "- interpret results with caution."))
}
# ANOVA
formula <- as.formula(paste(outcome_var, "~", group_var))
aov_result <- aov(formula, data = data)
# Extract ANOVA results
anova_summary <- summary(aov_result)[[1]]
anova_df <- data.frame(
outcome = outcome_var,
df = anova_summary$Df[1],
sum_sq = anova_summary$`Sum Sq`[1],
mean_sq = anova_summary$`Mean Sq`[1],
F_value = anova_summary$`F value`[1],
p_value = anova_summary$`Pr(>F)`[1]
)
# Tukey's HSD
tukey_result <- TukeyHSD(aov_result)
tukey_df <- as.data.frame(tukey_result[[group_var]])
tukey_df$comparison <- rownames(tukey_df)
# Return tidy Tukey results with assumption checks
tukey_df %>%
transmute(
outcome = outcome_var,
comparison,
diff,
lwr,
upr,
p_adj = `p adj`
) %>%
mutate(anova_p_value = anova_df$p_value,
normality_p_value = assumptions$normality_p,
levene_p_value = assumptions$levene_p)
}
# Run all tests and combine results
all_tukey_tests <- map_df(EF_cols,
~run_tukey(.x, data = student_data_EF, group_var = "pre_cluster_groups"))
## Warning in leveneTest.default(y = y, group = group, ...): group coerced to
## factor.
## Warning in run_tukey(.x, data = student_data_EF, group_var =
## "pre_cluster_groups"): Assumptions violated for WM - interpret results with
## caution.
## Warning in leveneTest.default(y = y, group = group, ...): group coerced to
## factor.
## Warning in run_tukey(.x, data = student_data_EF, group_var =
## "pre_cluster_groups"): Assumptions violated for Inhibition - interpret results
## with caution.
## Warning in leveneTest.default(y = y, group = group, ...): group coerced to
## factor.
# View results in nice format
all_tukey_tests %>%
arrange(outcome, p_adj) %>%
mutate(across(c(diff, lwr, upr, p_adj, anova_p_value, normality_p_value, levene_p_value), ~round(., 4))) %>%
knitr::kable()
| outcome | comparison | diff | lwr | upr | p_adj | anova_p_value | normality_p_value | levene_p_value | |
|---|---|---|---|---|---|---|---|---|---|
| lMP_mMA-hMP_mMA…1 | Flexibility | lMP_mMA-hMP_mMA | -0.2281 | -0.4710 | 0.0149 | 0.0706 | 0.0625 | 0.5700 | 0.0732 |
| hMP_mMA-hMP_lMA…2 | Flexibility | hMP_mMA-hMP_lMA | 0.1703 | -0.0616 | 0.4022 | 0.1934 | 0.0625 | 0.5700 | 0.0732 |
| lMP_mMA-hMP_lMA…3 | Flexibility | lMP_mMA-hMP_lMA | -0.0577 | -0.3084 | 0.1930 | 0.8483 | 0.0625 | 0.5700 | 0.0732 |
| lMP_mMA-hMP_mMA…4 | Inhibition | lMP_mMA-hMP_mMA | -0.1003 | -0.2577 | 0.0571 | 0.2883 | 0.2865 | 0.0184 | 0.6366 |
| lMP_mMA-hMP_lMA…5 | Inhibition | lMP_mMA-hMP_lMA | -0.0854 | -0.2479 | 0.0770 | 0.4270 | 0.2865 | 0.0184 | 0.6366 |
| hMP_mMA-hMP_lMA…6 | Inhibition | hMP_mMA-hMP_lMA | 0.0149 | -0.1354 | 0.1651 | 0.9699 | 0.2865 | 0.0184 | 0.6366 |
| lMP_mMA-hMP_lMA…7 | WM | lMP_mMA-hMP_lMA | -0.3134 | -0.8293 | 0.2025 | 0.3221 | 0.3487 | 0.0001 | 0.6518 |
| lMP_mMA-hMP_mMA…8 | WM | lMP_mMA-hMP_mMA | -0.2038 | -0.6919 | 0.2844 | 0.5832 | 0.3487 | 0.0001 | 0.6518 |
| hMP_mMA-hMP_lMA…9 | WM | hMP_mMA-hMP_lMA | -0.1096 | -0.5756 | 0.3564 | 0.8419 | 0.3487 | 0.0001 | 0.6518 |
dunn.test(student_data_EF$WM, g=student_data$pre_cluster_groups, method='bonferroni')
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 5.3039, df = 2, p-value = 0.07
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -0.772640
## | 0.6596
## |
## lMP_mMA | -2.268953 -1.600063
## | 0.0349 0.1644
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
dunn.test(student_data_EF$Inhibition, g=student_data$pre_cluster_groups, method='bonferroni')
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 0.5611, df = 2, p-value = 0.76
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA hMP_mMA
## ---------+----------------------
## hMP_mMA | -0.464628
## | 0.9633
## |
## lMP_mMA | 0.280091 0.732592
## | 1.0000 0.6957
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# Modeling with lm()
model <- lm(WM ~ scale(MA) * scale(Pre_MP), data = student_data_EF)
summary(model)
##
## Call:
## lm(formula = WM ~ scale(MA) * scale(Pre_MP), data = student_data_EF)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.3084 -0.3356 -0.1864 0.6825 1.9844
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 7.21946 0.09201 78.461 <2e-16 ***
## scale(MA) -0.01321 0.09748 -0.136 0.892
## scale(Pre_MP) 0.11490 0.09800 1.172 0.244
## scale(MA):scale(Pre_MP) -0.07290 0.09322 -0.782 0.436
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.8629 on 103 degrees of freedom
## (10 observations deleted due to missingness)
## Multiple R-squared: 0.01901, Adjusted R-squared: -0.009562
## F-statistic: 0.6653 on 3 and 103 DF, p-value: 0.5752
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.94555, p-value = 0.0002574
bptest(model) # if p > 0.05 -- no strong evidence of heteroscedasticity (OLS is okay)
##
## studentized Breusch-Pagan test
##
## data: model
## BP = 5.0527, df = 3, p-value = 0.168
# Modeling with lm_robust()
model_robust <- lm_robust(WM ~ scale(MA) * scale(Pre_MP), data = student_data_EF)
summary(model_robust)
##
## Call:
## lm_robust(formula = WM ~ scale(MA) * scale(Pre_MP), data = student_data_EF)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower CI Upper
## (Intercept) 7.21946 0.09702 74.4127 2.332e-91 7.0270 7.4119
## scale(MA) -0.01321 0.08228 -0.1605 8.728e-01 -0.1764 0.1500
## scale(Pre_MP) 0.11490 0.11312 1.0157 3.121e-01 -0.1095 0.3392
## scale(MA):scale(Pre_MP) -0.07290 0.08978 -0.8120 4.187e-01 -0.2510 0.1052
## DF
## (Intercept) 103
## scale(MA) 103
## scale(Pre_MP) 103
## scale(MA):scale(Pre_MP) 103
##
## Multiple R-squared: 0.01901 , Adjusted R-squared: -0.009562
## F-statistic: 0.5276 on 3 and 103 DF, p-value: 0.6643
# Plotting by continuous
ggplot(student_data_EF, aes(x = WM, y = WM , color = MA_level, alpha = 0.7)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter() +
#facet_wrap(~MA_level) +
labs(x = "Pre_MP",
y = "WM",
color = "MA_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 10 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning: Removed 10 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Removed 10 rows containing missing values or values outside the scale range
## (`geom_point()`).
# Modeling with lm()
model <- lm(Inhibition ~ scale(MA) * scale(Pre_MP), data = student_data_EF)
summary(model)
##
## Call:
## lm(formula = Inhibition ~ scale(MA) * scale(Pre_MP), data = student_data_EF)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.18028 -0.16671 0.01326 0.19455 0.63624
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 1.757787 0.029760 59.066 <2e-16 ***
## scale(MA) -0.003698 0.030061 -0.123 0.902
## scale(Pre_MP) 0.020068 0.031064 0.646 0.520
## scale(MA):scale(Pre_MP) 0.018116 0.029539 0.613 0.541
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.2919 on 113 degrees of freedom
## Multiple R-squared: 0.01229, Adjusted R-squared: -0.01393
## F-statistic: 0.4687 on 3 and 113 DF, p-value: 0.7047
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.97398, p-value = 0.02244
bptest(model) # if p > 0.05 -- no strong evidence of heteroscedasticity (OLS is okay)
##
## studentized Breusch-Pagan test
##
## data: model
## BP = 0.78141, df = 3, p-value = 0.8539
# Modeling with lm_robust()
model_robust <- lm_robust(Inhibition ~ scale(MA) * scale(Pre_MP), data = student_data_EF)
summary(model_robust)
##
## Call:
## lm_robust(formula = Inhibition ~ scale(MA) * scale(Pre_MP), data = student_data_EF)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|) CI Lower
## (Intercept) 1.757787 0.02987 58.8383 1.320e-86 1.69860
## scale(MA) -0.003698 0.02898 -0.1276 8.987e-01 -0.06112
## scale(Pre_MP) 0.020068 0.03391 0.5918 5.552e-01 -0.04712
## scale(MA):scale(Pre_MP) 0.018116 0.02628 0.6892 4.921e-01 -0.03396
## CI Upper DF
## (Intercept) 1.81697 113
## scale(MA) 0.05372 113
## scale(Pre_MP) 0.08725 113
## scale(MA):scale(Pre_MP) 0.07019 113
##
## Multiple R-squared: 0.01229 , Adjusted R-squared: -0.01393
## F-statistic: 0.5355 on 3 and 113 DF, p-value: 0.6589
# Plotting by continuous
ggplot(student_data_EF, aes(x = Inhibition, y = WM , color = MA_level, alpha = 0.7)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter() +
#facet_wrap(~MA_level) +
labs(x = "Pre_MP",
y = "Inhibition",
color = "MA_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 10 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning: Removed 10 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Removed 10 rows containing missing values or values outside the scale range
## (`geom_point()`).
# Modeling with lm()
model <- lm(Flexibility ~ scale(MA) * scale(Pre_MP), data = student_data_EF)
summary(model)
##
## Call:
## lm(formula = Flexibility ~ scale(MA) * scale(Pre_MP), data = student_data_EF)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.01528 -0.36194 -0.00265 0.34426 0.97475
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 1.80189 0.04647 38.776 <2e-16 ***
## scale(MA) 0.03328 0.04694 0.709 0.480
## scale(Pre_MP) 0.05345 0.04851 1.102 0.273
## scale(MA):scale(Pre_MP) 0.02006 0.04612 0.435 0.664
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.4558 on 113 degrees of freedom
## Multiple R-squared: 0.01566, Adjusted R-squared: -0.01047
## F-statistic: 0.5993 on 3 and 113 DF, p-value: 0.6168
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.98663, p-value = 0.3032
bptest(model) # if p > 0.05 -- no strong evidence of heteroscedasticity (OLS is okay)
##
## studentized Breusch-Pagan test
##
## data: model
## BP = 5.4871, df = 3, p-value = 0.1394
# Plotting by continuous
ggplot(student_data_EF, aes(x = Flexibility, y = WM , color = MA_level, alpha = 0.7)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter() +
#facet_wrap(~MA_level) +
labs(x = "Pre_MP",
y = "Flexibility",
color = "MA_level") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 10 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning: Removed 10 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Removed 10 rows containing missing values or values outside the scale range
## (`geom_point()`).
student_data_EF_without_h_l <- student_data_EF %>%
filter (pre_cluster_groups %in% c("hMP_mMA", "lMP_mMA"))
# WM
## Plot time distribution by cluster
ggplot(student_data_EF_without_h_l, aes(x = Pre_Time, fill = WM_level)) +
geom_density(alpha = 0.5) +
facet_wrap(~pre_cluster_groups) +
labs(title = "Distribution of Pretest Time by cluster",
x = "Mean Time",
y = "Density")
## Warning: Groups with fewer than two data points have been dropped.
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
## Plot time distribution by cluster
ggplot(student_data_EF_without_h_l, aes(x = Pre_Time_Correct, fill = WM_level)) +
geom_density(alpha = 0.5) +
facet_wrap(~pre_cluster_groups) +
labs(title = "Distribution of Pretest Time by cluster",
x = "Mean Time",
y = "Density")
## Warning: Removed 17 rows containing non-finite outside the scale range
## (`stat_density()`).
## Warning: Groups with fewer than two data points have been dropped.
## Warning in max(ids, na.rm = TRUE): no non-missing arguments to max; returning
## -Inf
# Inhibition
## Plot time distribution by cluster
ggplot(student_data_EF_without_h_l, aes(x = Pre_Time, fill = Inhibition_level)) +
geom_density(alpha = 0.5) +
facet_wrap(~pre_cluster_groups) +
labs(title = "Distribution of Pretest Time by cluster",
x = "Mean Time",
y = "Density")
## Plot time distribution by cluster
ggplot(student_data_EF_without_h_l, aes(x = Pre_Time_Correct, fill = Inhibition_level)) +
geom_density(alpha = 0.5) +
facet_wrap(~pre_cluster_groups) +
labs(title = "Distribution of Pretest Time by cluster",
x = "Mean Time",
y = "Density")
## Warning: Removed 17 rows containing non-finite outside the scale range
## (`stat_density()`).
# Flexibility
## Plot time distribution by cluster
ggplot(student_data_EF_without_h_l, aes(x = Pre_Time, fill = Flexibility_level)) +
geom_density(alpha = 0.5) +
facet_wrap(~pre_cluster_groups) +
labs(title = "Distribution of Pretest Time by cluster",
x = "Mean Time",
y = "Density")
## Plot time distribution by cluster
ggplot(student_data_EF_without_h_l, aes(x = Pre_Time_Correct, fill = Flexibility_level)) +
geom_density(alpha = 0.5) +
facet_wrap(~pre_cluster_groups) +
labs(title = "Distribution of Pretest Time by cluster",
x = "Mean Time",
y = "Density")
## Warning: Removed 17 rows containing non-finite outside the scale range
## (`stat_density()`).
Will congruent and incongruent cues differently impact the processing efficiency of students from different anxious clusters?
experiment_data %>%
summarise(
Minimum = min(trialElapsedTime, na.rm = TRUE),
Mean = mean(trialElapsedTime, na.rm = TRUE),
Median = median(trialElapsedTime, na.rm = TRUE),
Maximum = max(trialElapsedTime, na.rm = TRUE),
`Standard Deviation` = sd(trialElapsedTime, na.rm = TRUE),
`Count > 3*SD` = sum(trialElapsedTime > 3 * sd(trialElapsedTime), na.rm = TRUE)
)
## # A tibble: 1 × 6
## Minimum Mean Median Maximum `Standard Deviation` `Count > 3*SD`
## <dbl> <dbl> <dbl> <dbl> <dbl> <int>
## 1 5.03 31.1 23.7 402. 25.5 62
## Time distribution
ggplot(math_data, aes(x = log(trialElapsedTime), fill = Test)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of logged response time",
x = "Response time",
y = "Density")
exp_mean_data_if_simp <- experiment_data %>%
group_by (pid, Simplification) %>%
summarise(Exp_MP = mean(Correct),
Exp_Time = mean(trialElapsedTime))
## `summarise()` has grouped output by 'pid'. You can override using the `.groups`
## argument.
# Plot correctness distribution by Simplification
ggplot(exp_mean_data_if_simp, aes(x = Exp_MP, fill = Simplification)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Experiment Correctness by Simplification",
x = "Mean Correctness",
y = "Density")
# Plot time distribution by Simplification
ggplot(exp_mean_data_if_simp, aes(x = Exp_Time, fill = Simplification)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Experiment Time by Simplification",
x = "Mean Time",
y = "Density")
exp_mean_data_if_HOO <- experiment_data %>%
group_by (pid, HOO_Position) %>%
summarise(Exp_MP = mean(Correct),
Exp_Time = mean(trialElapsedTime))
## `summarise()` has grouped output by 'pid'. You can override using the `.groups`
## argument.
# Plot correctness distribution by HOO_Position
ggplot(exp_mean_data_if_HOO, aes(x = Exp_MP, fill = HOO_Position)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Experiment Correctness by HOO_Position",
x = "Mean Correctness",
y = "Density")
# Plot time distribution by HOO_Position
ggplot(exp_mean_data_if_HOO, aes(x = Exp_Time, fill = HOO_Position)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Experiment Time by HOO_Position",
x = "Mean Time",
y = "Density")
## Student-level data
student_data_exp %>%
summarise(
min = min(Exp_Time, na.rm = TRUE),
mean = mean(Exp_Time, na.rm = TRUE),
sd = sd(Exp_Time, na.rm = TRUE),
median = median(Exp_Time, na.rm = TRUE),
max = max(Exp_Time, na.rm = TRUE),
zeros = sum(Exp_Time == 0, na.rm = TRUE) / n()
) %>%
pivot_longer(everything(),
names_to = "Stat",
values_to = "Value")
## # A tibble: 6 × 2
## Stat Value
## <chr> <dbl>
## 1 min 8.81
## 2 mean 31.0
## 3 sd 14.7
## 4 median 27.3
## 5 max 90.7
## 6 zeros 0
# Plot time distribution
ggplot(student_data_exp, aes(x = Exp_Time)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Experiment Time",
x = "Mean Time",
y = "Density")
# Plot time distribution by Simplification
## Reshape data to long format
student_data_long <- student_data_exp %>%
pivot_longer(cols = c(Exp_Time_Correct, Exp_Time_Incorrect),
names_to = "Category",
values_to = "Time")
## Create density plot
ggplot(student_data_long, aes(x = Time, fill = Category)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Experiment Times",
x = "Time",
y = "Density") +
scale_fill_manual(values = c("Exp_Time_Correct" = "green",
"Exp_Time_Incorrect" = "red"),
labels = c("Correct Responses",
"Incorrect Responses")) +
theme_minimal()
## Warning: Removed 31 rows containing non-finite outside the scale range
## (`stat_density()`).
# Plot time distribution by Simplification
ggplot(exp_mean_data_if_simp, aes(x = Exp_Time, fill = Simplification)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Experiment Time by Simplification",
x = "Mean Time",
y = "Density")
# Plot time distribution by HOO_Position
ggplot(exp_mean_data_if_HOO, aes(x = Exp_Time, fill = HOO_Position)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Experiment Time by HOO_Position",
x = "Mean Time",
y = "Density")
# Plot time distribution by cluster
ggplot(student_data_exp, aes(x = Exp_Time, fill = pre_cluster_groups)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Experiment Time by cluster",
x = "Mean Time",
y = "Density")
hist(student_data_exp$Exp_Time)
hist(log(student_data_exp$Exp_Time))
qqnorm(student_data_exp$Exp_Time)
student_data_exp %>%
summarise(
Minimum = min(Exp_Time, na.rm = TRUE),
Median = median(Exp_Time, na.rm = TRUE),
Mean = mean(Exp_Time, na.rm = TRUE),
`Standard Deviation` = sd(Exp_Time, na.rm = TRUE),
`Count > 3*SD` = sum(Exp_Time > 3 * sd(Exp_Time), na.rm = TRUE)
)
## Minimum Median Mean Standard Deviation Count > 3*SD
## 1 8.80939 27.3239 30.979 14.69739 18
student_data_exp %>%
summarise(
min = min(Exp_Time_Correct, na.rm = TRUE),
mean = mean(Exp_Time_Correct, na.rm = TRUE),
sd = sd(Exp_Time_Correct, na.rm = TRUE),
median = median(Exp_Time_Correct, na.rm = TRUE),
max = max(Exp_Time_Correct, na.rm = TRUE),
zeros = sum(Exp_Time_Correct == 0, na.rm = TRUE) / n()
) %>%
pivot_longer(everything(),
names_to = "Stat",
values_to = "Value")
## # A tibble: 6 × 2
## Stat Value
## <chr> <dbl>
## 1 min 10.4
## 2 mean 30.5
## 3 sd 17.6
## 4 median 27.3
## 5 max 157.
## 6 zeros 0
hist(student_data_exp$Exp_Time_Correct, breaks = 60)
hist(log(student_data_exp$Exp_Time_Correct), breaks = 60)
shapiro.test(log(student_data_exp$Exp_Time_Correct))
##
## Shapiro-Wilk normality test
##
## data: log(student_data_exp$Exp_Time_Correct)
## W = 0.97394, p-value = 0.05047
# Plot time distribution by cluster
ggplot(student_data_exp, aes(x = Exp_Time_Correct, fill = pre_cluster_groups)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Experiment Time in Correct Problems by cluster",
x = "Mean Time",
y = "Density")
## Warning: Removed 15 rows containing non-finite outside the scale range
## (`stat_density()`).
# Plot time distribution by cluster by condition
ggplot(student_data_exp, aes(x = Exp_Time_Correct, fill = pre_cluster_groups)) +
geom_density(alpha = 0.5) +
facet_wrap(~Condition) +
labs(title = "Distribution of Experiment Time in Correct Problems by cluster",
x = "Mean Time",
y = "Density")
## Warning: Removed 15 rows containing non-finite outside the scale range
## (`stat_density()`).
student_data_exp %>%
summarise(
Minimum = min(Exp_Time_Correct, na.rm = TRUE),
Median = median(Exp_Time_Correct, na.rm = TRUE),
Mean = mean(Exp_Time_Correct, na.rm = TRUE),
`Standard Deviation` = sd(Exp_Time_Correct, na.rm = TRUE),
`Count > 3*SD` = sum(Exp_Time_Correct > 3 * sd(Exp_Time_Correct), na.rm = TRUE)
)
## Minimum Median Mean Standard Deviation Count > 3*SD
## 1 10.39349 27.30136 30.5252 17.60776 0
student_data_exp %>%
summarise(
min = min(Time_dif_Correct, na.rm = TRUE),
mean = mean(Time_dif_Correct, na.rm = TRUE),
sd = sd(Time_dif_Correct, na.rm = TRUE),
median = median(Time_dif_Correct, na.rm = TRUE),
max = max(Time_dif_Correct, na.rm = TRUE),
zeros = sum(Time_dif_Correct == 0, na.rm = TRUE) / n()
) %>%
pivot_longer(everything(),
names_to = "Stat",
values_to = "Value")
## # A tibble: 6 × 2
## Stat Value
## <chr> <dbl>
## 1 min -37.9
## 2 mean -5.17
## 3 sd 14.2
## 4 median -4.80
## 5 max 104.
## 6 zeros 0
# Plot time distribution
ggplot(student_data_exp, aes(x = MP_dif)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Difference in Time in Correct Problems",
x = "MP_dif",
y = "Density")
hist(student_data_exp$Time_dif_Correct, breaks = 30)
shapiro.test(student_data_exp$Time_dif_Correct)
##
## Shapiro-Wilk normality test
##
## data: student_data_exp$Time_dif_Correct
## W = 0.6241, p-value = 4.44e-14
hist(log(student_data_exp$Time_dif_Correct), breaks = 30)
## Warning in log(student_data_exp$Time_dif_Correct): NaNs produced
shapiro.test(log(student_data_exp$Time_dif_Correct))
## Warning in log(student_data_exp$Time_dif_Correct): NaNs produced
##
## Shapiro-Wilk normality test
##
## data: log(student_data_exp$Time_dif_Correct)
## W = 0.93813, p-value = 0.2967
# Plot time distribution by cluster
ggplot(student_data_exp, aes(x = Time_dif_Correct, fill = pre_cluster_groups)) +
geom_density(alpha = 0.5) +
labs(title = "Distribution of Difference in Time in Correct Problems by cluster",
x = "Mean Time",
y = "Density")
## Warning: Removed 19 rows containing non-finite outside the scale range
## (`stat_density()`).
# Plot time distribution by cluster by condition
ggplot(student_data_exp, aes(x = Time_dif_Correct, fill = pre_cluster_groups)) +
geom_density(alpha = 0.5) +
facet_wrap(~Condition) +
labs(title = "Distribution of Experiment Time in Correct Problems by cluster",
x = "Mean Time",
y = "Density")
## Warning: Removed 19 rows containing non-finite outside the scale range
## (`stat_density()`).
student_data_exp %>%
summarise(
Minimum = min(Time_dif_Correct, na.rm = TRUE),
Median = median(Time_dif_Correct, na.rm = TRUE),
Mean = mean(Time_dif_Correct, na.rm = TRUE),
`Standard Deviation` = sd(Time_dif_Correct, na.rm = TRUE),
`Count > 3*SD` = sum(Time_dif_Correct > 3 * sd(Time_dif_Correct), na.rm = TRUE)
)
## Minimum Median Mean Standard Deviation Count > 3*SD
## 1 -37.8744 -4.802705 -5.168793 14.24301 0
table(student_data_exp$pre_cluster_groups, student_data_exp$Condition)
##
## Congruent Incongruent
## hMP_lMA 26 13
## hMP_mMA 18 25
## lMP_mMA 16 14
student_data_exp %>%
group_by(pre_cluster_groups, Condition) %>%
summarise(n = n())
## `summarise()` has grouped output by 'pre_cluster_groups'. You can override
## using the `.groups` argument.
## # A tibble: 6 × 3
## # Groups: pre_cluster_groups [3]
## pre_cluster_groups Condition n
## <chr> <chr> <int>
## 1 hMP_lMA Congruent 26
## 2 hMP_lMA Incongruent 13
## 3 hMP_mMA Congruent 18
## 4 hMP_mMA Incongruent 25
## 5 lMP_mMA Congruent 16
## 6 lMP_mMA Incongruent 14
student_data_exp %>%
filter (Exp_MP > 0 & Pre_MP > 0) %>%
group_by(pre_cluster_groups, Condition) %>%
summarise(n = n())
## `summarise()` has grouped output by 'pre_cluster_groups'. You can override
## using the `.groups` argument.
## # A tibble: 6 × 3
## # Groups: pre_cluster_groups [3]
## pre_cluster_groups Condition n
## <chr> <chr> <int>
## 1 hMP_lMA Congruent 26
## 2 hMP_lMA Incongruent 13
## 3 hMP_mMA Congruent 18
## 4 hMP_mMA Incongruent 25
## 5 lMP_mMA Congruent 9
## 6 lMP_mMA Incongruent 2
student_data_exp$Composite_score_exp <- student_data_exp$Exp_MP / student_data_exp$Exp_Time
student_data_exp$Composite_score_dif <- student_data_exp$Composite_score_exp - student_data_exp$Composite_score_pre
# Create a combined group-condition factor
student_data_exp$group_condition <- interaction(student_data_exp$pre_cluster_groups, student_data_exp$Condition)
print ("with at least one correct response in pretest and experiment")
## [1] "with at least one correct response in pretest and experiment"
student_data_exp %>% filter (Exp_MP > 0 & Pre_MP > 0) %>%
group_by (pre_cluster_groups, Condition) %>%
summarise(n = n())
## `summarise()` has grouped output by 'pre_cluster_groups'. You can override
## using the `.groups` argument.
## # A tibble: 6 × 3
## # Groups: pre_cluster_groups [3]
## pre_cluster_groups Condition n
## <chr> <chr> <int>
## 1 hMP_lMA Congruent 26
## 2 hMP_lMA Incongruent 13
## 3 hMP_mMA Congruent 18
## 4 hMP_mMA Incongruent 25
## 5 lMP_mMA Congruent 9
## 6 lMP_mMA Incongruent 2
# Dunn Test
dunn_results <- dunn.test(student_data_exp$MP_dif, student_data_exp$group_condition, method = "bonferroni")
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 2.2487, df = 5, p-value = 0.81
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA. lMP_mMA.
## ---------+-------------------------------------------------------
## hMP_lMA. | -0.192519
## | 1.0000
## |
## hMP_mMA. | 0.997471 1.019963
## | 1.0000 1.0000
## |
## hMP_mMA. | 0.202964 0.357512 -0.805492
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | 1.019148 1.042396 0.052335 0.833894
## | 1.0000 1.0000 1.0000 1.0000
## |
## lMP_mMA. | 0.165989 0.312647 -0.703866 -0.005474 -0.734511
## | 1.0000 1.0000 1.0000 1.0000 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# Tidy results
dunn_df <- data.frame(
comparison = dunn_results$comparisons,
Z = round(dunn_results$Z, 4),
p_adjusted = round(dunn_results$P.adjusted, 4)
) %>% arrange(p_adjusted)
knitr::kable(dunn_df)
| comparison | Z | p_adjusted |
|---|---|---|
| hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.1925 | 1 |
| hMP_lMA.Congruent - hMP_mMA.Congruent | 0.9975 | 1 |
| hMP_lMA.Incongruent - hMP_mMA.Congruent | 1.0200 | 1 |
| hMP_lMA.Congruent - hMP_mMA.Incongruent | 0.2030 | 1 |
| hMP_lMA.Incongruent - hMP_mMA.Incongruent | 0.3575 | 1 |
| hMP_mMA.Congruent - hMP_mMA.Incongruent | -0.8055 | 1 |
| hMP_lMA.Congruent - lMP_mMA.Congruent | 1.0191 | 1 |
| hMP_lMA.Incongruent - lMP_mMA.Congruent | 1.0424 | 1 |
| hMP_mMA.Congruent - lMP_mMA.Congruent | 0.0523 | 1 |
| hMP_mMA.Incongruent - lMP_mMA.Congruent | 0.8339 | 1 |
| hMP_lMA.Congruent - lMP_mMA.Incongruent | 0.1660 | 1 |
| hMP_lMA.Incongruent - lMP_mMA.Incongruent | 0.3126 | 1 |
| hMP_mMA.Congruent - lMP_mMA.Incongruent | -0.7039 | 1 |
| hMP_mMA.Incongruent - lMP_mMA.Incongruent | -0.0055 | 1 |
| lMP_mMA.Congruent - lMP_mMA.Incongruent | -0.7345 | 1 |
student_data_exp$Condition <- factor(student_data_exp$Condition, levels = c("Congruent", "Incongruent"))
# Visualization with boxplots
ggplot(student_data_exp, aes(x = group_condition, y = MP_dif, fill = Condition)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(title = "Comparison by performance change", x = "Group-Condition", y = "Math performence change") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = "none")
# Visualization of pretest MP with boxplots
ggplot(student_data, aes(x = pre_cluster_groups, y = Pre_MP)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(title = "Comparison by performance in pretest", x = "Group-Condition", y = "Math performence in pretest") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = "none")
# Visualization of experiment MP with boxplots
ggplot(student_data_exp, aes(x = group_condition, y = Exp_MP, fill = Condition)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(title = "Comparison by performance in experiment", x = "Group-Condition", y = "Math performence in experiment") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = "none")
# Deleting lMP_mMA, as it has only 2 participants
student_data_exp_without_LP_MA_incong <- student_data_exp %>%
filter (! (pre_cluster_groups == "lMP_mMA" & Condition == "Incongruent") )
# Dunn Test
dunn_results <- dunn.test(student_data_exp_without_LP_MA_incong$Time_dif_Correct,
student_data_exp_without_LP_MA_incong$group_condition, method = "bonferroni")
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 5.0015, df = 4, p-value = 0.29
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.861637
## | 1.0000
## |
## hMP_mMA. | 0.513416 1.236643
## | 1.0000 1.0000
## |
## hMP_mMA. | 1.177138 1.820236 0.557398
## | 1.0000 0.3436 1.0000
## |
## lMP_mMA. | -0.832923 -0.067906 -1.174664 -1.676891
## | 1.0000 1.0000 1.0000 0.4678
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# Tidy results
dunn_df <- data.frame(
comparison = dunn_results$comparisons,
Z = round(dunn_results$Z, 4),
p_adjusted = round(dunn_results$P.adjusted, 4)
) %>% arrange(p_adjusted)
knitr::kable(dunn_df)
| comparison | Z | p_adjusted |
|---|---|---|
| hMP_lMA.Incongruent - hMP_mMA.Incongruent | 1.8202 | 0.3436 |
| hMP_mMA.Incongruent - lMP_mMA.Congruent | -1.6769 | 0.4678 |
| hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.8616 | 1.0000 |
| hMP_lMA.Congruent - hMP_mMA.Congruent | 0.5134 | 1.0000 |
| hMP_lMA.Incongruent - hMP_mMA.Congruent | 1.2366 | 1.0000 |
| hMP_lMA.Congruent - hMP_mMA.Incongruent | 1.1771 | 1.0000 |
| hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.5574 | 1.0000 |
| hMP_lMA.Congruent - lMP_mMA.Congruent | -0.8329 | 1.0000 |
| hMP_lMA.Incongruent - lMP_mMA.Congruent | -0.0679 | 1.0000 |
| hMP_mMA.Congruent - lMP_mMA.Congruent | -1.1747 | 1.0000 |
# Visualization with boxplots
ggplot(student_data_exp_without_LP_MA_incong, aes(x = group_condition, y = Time_dif_Correct, fill = Condition)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(title = "Comparison by correct time difference", x = "Group-Condition", y = "Time_dif_Correct") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = "none")
## Warning: Removed 7 rows containing non-finite outside the scale range
## (`stat_boxplot()`).
## Warning: Removed 7 rows containing missing values or values outside the scale range
## (`geom_point()`).
# Dunn Test
dunn_results <- dunn.test(student_data_exp$Time_dif, student_data_exp$group_condition, method = "bonferroni")
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 12.3127, df = 5, p-value = 0.03
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA. lMP_mMA.
## ---------+-------------------------------------------------------
## hMP_lMA. | -1.042476
## | 1.0000
## |
## hMP_mMA. | 0.715845 1.575942
## | 1.0000 0.8628
## |
## hMP_mMA. | 0.672937 1.586850 -0.100276
## | 1.0000 0.8441 1.0000
## |
## lMP_mMA. | -0.209659 0.769947 -0.832708 -0.796843
## | 1.0000 1.0000 1.0000 1.0000
## |
## lMP_mMA. | 2.670296 3.217602 1.868117 2.087115 2.600847
## | 0.0568 0.0097* 0.4631 0.2766 0.0697
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# Tidy results
dunn_df <- data.frame(
comparison = dunn_results$comparisons,
Z = round(dunn_results$Z, 4),
p_adjusted = round(dunn_results$P.adjusted, 4)
) %>% arrange(p_adjusted)
knitr::kable(dunn_df)
| comparison | Z | p_adjusted |
|---|---|---|
| hMP_lMA.Incongruent - lMP_mMA.Incongruent | 3.2176 | 0.0097 |
| hMP_lMA.Congruent - lMP_mMA.Incongruent | 2.6703 | 0.0568 |
| lMP_mMA.Congruent - lMP_mMA.Incongruent | 2.6008 | 0.0697 |
| hMP_mMA.Incongruent - lMP_mMA.Incongruent | 2.0871 | 0.2766 |
| hMP_mMA.Congruent - lMP_mMA.Incongruent | 1.8681 | 0.4631 |
| hMP_lMA.Incongruent - hMP_mMA.Incongruent | 1.5869 | 0.8441 |
| hMP_lMA.Incongruent - hMP_mMA.Congruent | 1.5759 | 0.8628 |
| hMP_lMA.Congruent - hMP_lMA.Incongruent | -1.0425 | 1.0000 |
| hMP_lMA.Congruent - hMP_mMA.Congruent | 0.7158 | 1.0000 |
| hMP_lMA.Congruent - hMP_mMA.Incongruent | 0.6729 | 1.0000 |
| hMP_mMA.Congruent - hMP_mMA.Incongruent | -0.1003 | 1.0000 |
| hMP_lMA.Congruent - lMP_mMA.Congruent | -0.2097 | 1.0000 |
| hMP_lMA.Incongruent - lMP_mMA.Congruent | 0.7699 | 1.0000 |
| hMP_mMA.Congruent - lMP_mMA.Congruent | -0.8327 | 1.0000 |
| hMP_mMA.Incongruent - lMP_mMA.Congruent | -0.7968 | 1.0000 |
# Visualization with boxplots
ggplot(student_data_exp, aes(x = group_condition, y = Time_dif, fill = Condition)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(title = "Comparison by all time difference", x = "Group-Condition", y = "Time_dif") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = "none")
# Dunn Test
dunn_results <- dunn.test(student_data_exp$arousal_Exp_dif, student_data_exp$group_condition, method = "bonferroni")
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 6.9261, df = 5, p-value = 0.23
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA. lMP_mMA.
## ---------+-------------------------------------------------------
## hMP_lMA. | -1.938750
## | 0.3940
## |
## hMP_mMA. | -1.578106 0.479917
## | 0.8591 1.0000
## |
## hMP_mMA. | -0.988811 1.115940 0.669338
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | -0.303269 1.505644 1.127849 0.564142
## | 1.0000 0.9912 1.0000 1.0000
## |
## lMP_mMA. | 0.353218 2.013818 1.686476 1.180514 0.583263
## | 1.0000 0.3302 0.6878 1.0000 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# Tidy results
dunn_df <- data.frame(
comparison = dunn_results$comparisons,
Z = round(dunn_results$Z, 4),
p_adjusted = round(dunn_results$P.adjusted, 4)
) %>% arrange(p_adjusted)
knitr::kable(dunn_df)
| comparison | Z | p_adjusted |
|---|---|---|
| hMP_lMA.Incongruent - lMP_mMA.Incongruent | 2.0138 | 0.3302 |
| hMP_lMA.Congruent - hMP_lMA.Incongruent | -1.9388 | 0.3940 |
| hMP_mMA.Congruent - lMP_mMA.Incongruent | 1.6865 | 0.6878 |
| hMP_lMA.Congruent - hMP_mMA.Congruent | -1.5781 | 0.8591 |
| hMP_lMA.Incongruent - lMP_mMA.Congruent | 1.5056 | 0.9912 |
| hMP_lMA.Incongruent - hMP_mMA.Congruent | 0.4799 | 1.0000 |
| hMP_lMA.Congruent - hMP_mMA.Incongruent | -0.9888 | 1.0000 |
| hMP_lMA.Incongruent - hMP_mMA.Incongruent | 1.1159 | 1.0000 |
| hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.6693 | 1.0000 |
| hMP_lMA.Congruent - lMP_mMA.Congruent | -0.3033 | 1.0000 |
| hMP_mMA.Congruent - lMP_mMA.Congruent | 1.1278 | 1.0000 |
| hMP_mMA.Incongruent - lMP_mMA.Congruent | 0.5641 | 1.0000 |
| hMP_lMA.Congruent - lMP_mMA.Incongruent | 0.3532 | 1.0000 |
| hMP_mMA.Incongruent - lMP_mMA.Incongruent | 1.1805 | 1.0000 |
| lMP_mMA.Congruent - lMP_mMA.Incongruent | 0.5833 | 1.0000 |
# Visualization with boxplots
ggplot(student_data_exp, aes(x = group_condition, y = arousal_Exp_dif, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(title = "Comparison by all time difference", x = "Group-Condition", y = "arousal_Exp_dif") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = "none")
# Modeling with lm_robust()
model_robust <- lm_robust(arousal_Exp_dif ~ scale(MA) * scale(Pre_MP) * Condition, data = student_data_exp)
summary(model_robust)
##
## Call:
## lm_robust(formula = arousal_Exp_dif ~ scale(MA) * scale(Pre_MP) *
## Condition, data = student_data_exp)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) -0.08398 0.1855 -0.45268
## scale(MA) -0.01668 0.1936 -0.08616
## scale(Pre_MP) -0.10732 0.2283 -0.47013
## ConditionIncongruent 0.07730 0.2651 0.29154
## scale(MA):scale(Pre_MP) 0.11092 0.1713 0.64747
## scale(MA):ConditionIncongruent -0.06638 0.2695 -0.24628
## scale(Pre_MP):ConditionIncongruent 0.41348 0.2911 1.42061
## scale(MA):scale(Pre_MP):ConditionIncongruent -0.12940 0.2468 -0.52429
## Pr(>|t|) CI Lower CI Upper DF
## (Intercept) 0.6517 -0.4519 0.2839 104
## scale(MA) 0.9315 -0.4005 0.3672 104
## scale(Pre_MP) 0.6392 -0.5600 0.3454 104
## ConditionIncongruent 0.7712 -0.4485 0.6031 104
## scale(MA):scale(Pre_MP) 0.5188 -0.2288 0.4506 104
## scale(MA):ConditionIncongruent 0.8059 -0.6009 0.4681 104
## scale(Pre_MP):ConditionIncongruent 0.1584 -0.1637 0.9907 104
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.6012 -0.6188 0.3600 104
##
## Multiple R-squared: 0.04918 , Adjusted R-squared: -0.01482
## F-statistic: 0.8717 on 7 and 104 DF, p-value: 0.5315
standardize_parameters(model_robust)
## # Standardization method: refit
##
## Parameter | Std. Coef. | 95% CI
## -------------------------------------------------------------------------
## (Intercept) | -4.30e-03 | [-0.31, 0.31]
## scale(MA) | -0.01 | [-0.34, 0.31]
## scale(Pre_MP) | -0.09 | [-0.47, 0.29]
## ConditionIncongruent | 0.07 | [-0.38, 0.51]
## scale(MA):scale(Pre_MP) | 0.09 | [-0.19, 0.38]
## scale(MA):ConditionIncongruent | -0.06 | [-0.51, 0.39]
## scale(Pre_MP):ConditionIncongruent | 0.35 | [-0.14, 0.83]
## scale(MA):scale(Pre_MP):ConditionIncongruent | -0.11 | [-0.52, 0.30]
# Plotting
ggplot(student_data_exp, aes(x = Pre_MP, y = arousal_Exp_dif, color = MA_level, alpha = 0.7)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter() +
labs(x = "Mathematical Performance",
y = "arousal_Exp_dif",
color = "Math Anxiety Level") +
theme_minimal() +
facet_wrap(.~Condition) +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
# Dunn Test
dunn_results <- dunn.test(student_data_exp$valence_Exp_dif, student_data_exp$group_condition, method = "bonferroni")
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 3.6077, df = 5, p-value = 0.61
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA. lMP_mMA.
## ---------+-------------------------------------------------------
## hMP_lMA. | -0.045445
## | 1.0000
## |
## hMP_mMA. | 0.795418 0.712491
## | 1.0000 1.0000
## |
## hMP_mMA. | 1.364374 1.162811 0.447340
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | 0.447060 0.421774 -0.296403 -0.750021
## | 1.0000 1.0000 1.0000 1.0000
## |
## lMP_mMA. | -0.442138 -0.340453 -1.095726 -1.583966 -0.788655
## | 1.0000 1.0000 1.0000 0.8490 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
# Tidy results
dunn_df <- data.frame(
comparison = dunn_results$comparisons,
Z = round(dunn_results$Z, 4),
p_adjusted = round(dunn_results$P.adjusted, 4)
) %>% arrange(p_adjusted)
knitr::kable(dunn_df)
| comparison | Z | p_adjusted |
|---|---|---|
| hMP_mMA.Incongruent - lMP_mMA.Incongruent | -1.5840 | 0.849 |
| hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.0454 | 1.000 |
| hMP_lMA.Congruent - hMP_mMA.Congruent | 0.7954 | 1.000 |
| hMP_lMA.Incongruent - hMP_mMA.Congruent | 0.7125 | 1.000 |
| hMP_lMA.Congruent - hMP_mMA.Incongruent | 1.3644 | 1.000 |
| hMP_lMA.Incongruent - hMP_mMA.Incongruent | 1.1628 | 1.000 |
| hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.4473 | 1.000 |
| hMP_lMA.Congruent - lMP_mMA.Congruent | 0.4471 | 1.000 |
| hMP_lMA.Incongruent - lMP_mMA.Congruent | 0.4218 | 1.000 |
| hMP_mMA.Congruent - lMP_mMA.Congruent | -0.2964 | 1.000 |
| hMP_mMA.Incongruent - lMP_mMA.Congruent | -0.7500 | 1.000 |
| hMP_lMA.Congruent - lMP_mMA.Incongruent | -0.4421 | 1.000 |
| hMP_lMA.Incongruent - lMP_mMA.Incongruent | -0.3405 | 1.000 |
| hMP_mMA.Congruent - lMP_mMA.Incongruent | -1.0957 | 1.000 |
| lMP_mMA.Congruent - lMP_mMA.Incongruent | -0.7887 | 1.000 |
# Visualization with boxplots
ggplot(student_data_exp, aes(x = group_condition, y = valence_Exp_dif, fill = pre_cluster_groups)) +
geom_boxplot(alpha = 0.7) +
geom_jitter(width = 0.2, alpha = 0.4) +
theme_minimal() +
labs(title = "Comparison by all time difference", x = "Group-Condition", y = "valence_Exp_dif") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = "none")
# Modeling with lm_robust()
model_robust <- lm_robust(valence_Exp_dif ~ scale(MA) * scale(Pre_MP) * Condition, data = student_data_exp)
summary(model_robust)
##
## Call:
## lm_robust(formula = valence_Exp_dif ~ scale(MA) * scale(Pre_MP) *
## Condition, data = student_data_exp)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) -0.30258 0.2222 -1.3618
## scale(MA) 0.07661 0.2152 0.3560
## scale(Pre_MP) 0.21062 0.1943 1.0841
## ConditionIncongruent 0.06905 0.2688 0.2568
## scale(MA):scale(Pre_MP) -0.17039 0.1557 -1.0947
## scale(MA):ConditionIncongruent -0.22937 0.2432 -0.9431
## scale(Pre_MP):ConditionIncongruent -0.54219 0.2415 -2.2451
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.29786 0.1896 1.5714
## Pr(>|t|) CI Lower CI Upper DF
## (Intercept) 0.17620 -0.74318 0.13803 104
## scale(MA) 0.72256 -0.35012 0.50333 104
## scale(Pre_MP) 0.28085 -0.17467 0.59591 104
## ConditionIncongruent 0.79781 -0.46408 0.60218 104
## scale(MA):scale(Pre_MP) 0.27619 -0.47906 0.13828 104
## scale(MA):ConditionIncongruent 0.34783 -0.71167 0.25293 104
## scale(Pre_MP):ConditionIncongruent 0.02688 -1.02109 -0.06328 104
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.11912 -0.07802 0.67375 104
##
## Multiple R-squared: 0.04923 , Adjusted R-squared: -0.01476
## F-statistic: 1.257 on 7 and 104 DF, p-value: 0.2791
standardize_parameters(model_robust)
## # Standardization method: refit
##
## Parameter | Std. Coef. | 95% CI
## --------------------------------------------------------------------------
## (Intercept) | -0.04 | [-0.41, 0.33]
## scale(MA) | 0.06 | [-0.29, 0.42]
## scale(Pre_MP) | 0.18 | [-0.15, 0.50]
## ConditionIncongruent | 0.06 | [-0.39, 0.51]
## scale(MA):scale(Pre_MP) | -0.14 | [-0.40, 0.12]
## scale(MA):ConditionIncongruent | -0.19 | [-0.60, 0.21]
## scale(Pre_MP):ConditionIncongruent | -0.46 | [-0.86, -0.05]
## scale(MA):scale(Pre_MP):ConditionIncongruent | 0.25 | [-0.07, 0.57]
# Plotting
ggplot(student_data_exp, aes(x = Pre_MP, y = valence_Exp_dif, color = MA_level, alpha = 0.7)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter() +
labs(x = "Mathematical Performance",
y = "valence_Exp_dif",
color = "Math Anxiety Level") +
theme_minimal() +
facet_wrap(.~Condition) +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
# Plotting
ggplot(student_data_exp, aes(x = MA, y = valence_Exp_dif, color = Condition, alpha = 0.7)) +
geom_point() +
geom_smooth(method = "lm", level = 0.95) +
geom_jitter() +
labs(x = "Mathematical anxiety",
y = "valence_Exp_mean") +
theme_minimal() +
theme(legend.position = "bottom")
## `geom_smooth()` using formula = 'y ~ x'
##
## Call:
## lm_robust(formula = MP_dif ~ scale(MA) * scale(Pre_MP) * Condition,
## data = student_data_exp)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) 0.043280 0.03032 1.4274
## scale(MA) -0.068505 0.02557 -2.6791
## scale(Pre_MP) -0.112689 0.03693 -3.0512
## ConditionIncongruent 0.012317 0.03973 0.3101
## scale(MA):scale(Pre_MP) 0.007447 0.03662 0.2033
## scale(MA):ConditionIncongruent 0.058973 0.03235 1.8232
## scale(Pre_MP):ConditionIncongruent 0.076092 0.04519 1.6838
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.007589 0.04222 0.1798
## Pr(>|t|) CI Lower CI Upper DF
## (Intercept) 0.156449 -0.01685 0.10341 104
## scale(MA) 0.008583 -0.11921 -0.01780 104
## scale(Pre_MP) 0.002892 -0.18593 -0.03945 104
## ConditionIncongruent 0.757134 -0.06646 0.09109 104
## scale(MA):scale(Pre_MP) 0.839270 -0.06518 0.08007 104
## scale(MA):ConditionIncongruent 0.071147 -0.00517 0.12312 104
## scale(Pre_MP):ConditionIncongruent 0.095228 -0.01352 0.16571 104
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.857675 -0.07612 0.09130 104
##
## Multiple R-squared: 0.1355 , Adjusted R-squared: 0.07731
## F-statistic: 1.946 on 7 and 104 DF, p-value: 0.0697
## # Standardization method: refit
##
## Parameter | Std. Coef. | 95% CI
## --------------------------------------------------------------------------
## (Intercept) | -0.02 | [-0.33, 0.29]
## scale(MA) | -0.35 | [-0.62, -0.09]
## scale(Pre_MP) | -0.58 | [-0.96, -0.20]
## ConditionIncongruent | 0.06 | [-0.34, 0.47]
## scale(MA):scale(Pre_MP) | 0.04 | [-0.34, 0.41]
## scale(MA):ConditionIncongruent | 0.30 | [-0.03, 0.64]
## scale(Pre_MP):ConditionIncongruent | 0.39 | [-0.07, 0.86]
## scale(MA):scale(Pre_MP):ConditionIncongruent | 0.04 | [-0.39, 0.47]
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
##
## Call:
## lm_robust(formula = Time_dif_Correct ~ scale(MA) * scale(Pre_MP) *
## Condition, data = student_data_exp)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) -2.876 3.023 -0.9515
## scale(MA) 3.376 4.336 0.7786
## scale(Pre_MP) -3.534 2.281 -1.5494
## ConditionIncongruent -5.611 3.621 -1.5495
## scale(MA):scale(Pre_MP) -2.816 2.919 -0.9646
## scale(MA):ConditionIncongruent -6.289 5.105 -1.2321
## scale(Pre_MP):ConditionIncongruent 5.732 3.722 1.5397
## scale(MA):scale(Pre_MP):ConditionIncongruent 4.050 5.077 0.7978
## Pr(>|t|) CI Lower CI Upper DF
## (Intercept) 0.3440 -8.887 3.134 85
## scale(MA) 0.4384 -5.245 11.997 85
## scale(Pre_MP) 0.1250 -8.069 1.001 85
## ConditionIncongruent 0.1250 -12.811 1.589 85
## scale(MA):scale(Pre_MP) 0.3375 -8.619 2.988 85
## scale(MA):ConditionIncongruent 0.2213 -16.439 3.860 85
## scale(Pre_MP):ConditionIncongruent 0.1273 -1.670 13.133 85
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.4272 -6.044 14.144 85
##
## Multiple R-squared: 0.1008 , Adjusted R-squared: 0.02673
## F-statistic: 1.251 on 7 and 85 DF, p-value: 0.2846
## # Standardization method: refit
##
## Parameter | Std. Coef. | 95% CI
## -------------------------------------------------------------------------
## (Intercept) | 0.04 | [-0.27, 0.36]
## scale(MA) | 0.15 | [-0.33, 0.63]
## scale(Pre_MP) | -0.14 | [-0.34, 0.05]
## ConditionIncongruent | -0.19 | [-0.56, 0.18]
## scale(MA):scale(Pre_MP) | -0.12 | [-0.37, 0.13]
## scale(MA):ConditionIncongruent | -0.31 | [-0.83, 0.20]
## scale(Pre_MP):ConditionIncongruent | 0.24 | [-0.12, 0.59]
## scale(MA):scale(Pre_MP):ConditionIncongruent | 0.18 | [-0.26, 0.61]
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 19 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning: Removed 19 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Removed 19 rows containing missing values or values outside the scale range
## (`geom_point()`).
##
## Call:
## lm_robust(formula = Time_dif ~ scale(MA) * scale(Pre_MP) * Condition,
## data = student_data_exp)
##
## Standard error type: HC2
##
## Coefficients:
## Estimate Std. Error t value
## (Intercept) -7.4023 1.575 -4.6985
## scale(MA) 0.5433 1.568 0.3466
## scale(Pre_MP) -0.2947 1.576 -0.1870
## ConditionIncongruent -3.0040 2.226 -1.3494
## scale(MA):scale(Pre_MP) -2.3945 2.056 -1.1646
## scale(MA):ConditionIncongruent -0.9967 2.197 -0.4537
## scale(Pre_MP):ConditionIncongruent 4.7186 2.369 1.9920
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.7305 2.710 0.2696
## Pr(>|t|) CI Lower CI Upper DF
## (Intercept) 8.055e-06 -10.52644 -4.278 104
## scale(MA) 7.296e-01 -2.56543 3.652 104
## scale(Pre_MP) 8.521e-01 -3.42031 2.831 104
## ConditionIncongruent 1.801e-01 -7.41874 1.411 104
## scale(MA):scale(Pre_MP) 2.468e-01 -6.47169 1.683 104
## scale(MA):ConditionIncongruent 6.510e-01 -5.35363 3.360 104
## scale(Pre_MP):ConditionIncongruent 4.900e-02 0.02112 9.416 104
## scale(MA):scale(Pre_MP):ConditionIncongruent 7.880e-01 -4.64286 6.104 104
##
## Multiple R-squared: 0.1382 , Adjusted R-squared: 0.08017
## F-statistic: 1.574 on 7 and 104 DF, p-value: 0.1513
## # Standardization method: refit
##
## Parameter | Std. Coef. | 95% CI
## -------------------------------------------------------------------------
## (Intercept) | 0.05 | [-0.24, 0.34]
## scale(MA) | 0.05 | [-0.24, 0.34]
## scale(Pre_MP) | -0.03 | [-0.32, 0.26]
## ConditionIncongruent | -0.28 | [-0.69, 0.13]
## scale(MA):scale(Pre_MP) | -0.22 | [-0.60, 0.16]
## scale(MA):ConditionIncongruent | -0.09 | [-0.50, 0.31]
## scale(Pre_MP):ConditionIncongruent | 0.44 | [ 0.00, 0.87]
## scale(MA):scale(Pre_MP):ConditionIncongruent | 0.07 | [-0.43, 0.57]
## `geom_smooth()` using formula = 'y ~ x'
exp_problem_level <- problem_level_math_student_data %>% filter (!grepl("^P", Image))
exp_problem_level_correct <- exp_problem_level %>% filter (Correct == "TRUE")
# Modeling with HLM
model <- lmer(log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) * Condition + (1|problem_Order) + (1|pid),
data = exp_problem_level_correct)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) * Condition +
## (1 | problem_Order) + (1 | pid)
## Data: exp_problem_level_correct
##
## REML criterion at convergence: 1360.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.4347 -0.6622 -0.0554 0.5615 4.2276
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.108399 0.32924
## problem_Order (Intercept) 0.003072 0.05542
## Residual 0.225528 0.47490
## Number of obs: 869, groups: pid, 97; problem_Order, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 3.182503 0.057679 55.176
## scale(MA) 0.079791 0.066316 1.203
## scale(Pre_MP) -0.072590 0.048990 -1.482
## ConditionIncongruent 0.025788 0.081314 0.317
## scale(MA):scale(Pre_MP) 0.002972 0.038561 0.077
## scale(MA):ConditionIncongruent 0.043724 0.086697 0.504
## scale(Pre_MP):ConditionIncongruent 0.042744 0.087860 0.487
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.114501 0.094311 1.214
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) sc(P_MP) CndtnI sc(MA):(P_MP) s(MA):C s(P_MP):
## scale(MA) 0.255
## scal(Pr_MP) 0.156 0.267
## CndtnIncngr -0.655 -0.181 -0.111
## sc(MA):(P_MP) 0.300 0.476 -0.228 -0.212
## scl(MA):CnI -0.195 -0.765 -0.204 0.033 -0.364
## sc(P_MP):CI -0.087 -0.149 -0.557 -0.048 0.127 0.259
## s(MA):(P_MP): -0.123 -0.195 0.093 0.279 -0.409 0.019 -0.532
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.98543, p-value = 1.334e-07
problem_level_math_student_data_correct <- problem_level_math_student_data %>% filter (Correct == "TRUE")
# Pretes_or_Exp = 0 for pretest, 1 for experiment
model <- lmer(log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) * Condition * Test + (1|problem_Order) + (1|pid),
data = problem_level_math_student_data_correct)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) * Condition *
## Test + (1 | problem_Order) + (1 | pid)
## Data: problem_level_math_student_data_correct
##
## REML criterion at convergence: 2464.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.4622 -0.6726 -0.0724 0.5918 4.4071
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.11892 0.3449
## problem_Order (Intercept) 0.01289 0.1135
## Residual 0.20544 0.4533
## Number of obs: 1702, groups: pid, 104; problem_Order, 24
##
## Fixed effects:
## Estimate Std. Error
## (Intercept) 3.170480 0.065197
## scale(MA) 0.068226 0.064908
## scale(Pre_MP) -0.070876 0.044916
## ConditionIncongruent 0.032190 0.081708
## TestPretest 0.200960 0.056891
## scale(MA):scale(Pre_MP) 0.001671 0.034763
## scale(MA):ConditionIncongruent 0.073203 0.085406
## scale(Pre_MP):ConditionIncongruent 0.069441 0.076598
## scale(MA):TestPretest -0.021464 0.038828
## scale(Pre_MP):TestPretest 0.007503 0.032190
## ConditionIncongruent:TestPretest -0.004418 0.047894
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.068011 0.073177
## scale(MA):scale(Pre_MP):TestPretest 0.028548 0.028136
## scale(MA):ConditionIncongruent:TestPretest 0.072111 0.050051
## scale(Pre_MP):ConditionIncongruent:TestPretest -0.026390 0.057961
## scale(MA):scale(Pre_MP):ConditionIncongruent:TestPretest -0.063697 0.057807
## t value
## (Intercept) 48.629
## scale(MA) 1.051
## scale(Pre_MP) -1.578
## ConditionIncongruent 0.394
## TestPretest 3.532
## scale(MA):scale(Pre_MP) 0.048
## scale(MA):ConditionIncongruent 0.857
## scale(Pre_MP):ConditionIncongruent 0.907
## scale(MA):TestPretest -0.553
## scale(Pre_MP):TestPretest 0.233
## ConditionIncongruent:TestPretest -0.092
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.929
## scale(MA):scale(Pre_MP):TestPretest 1.015
## scale(MA):ConditionIncongruent:TestPretest 1.441
## scale(Pre_MP):ConditionIncongruent:TestPretest -0.455
## scale(MA):scale(Pre_MP):ConditionIncongruent:TestPretest -1.102
##
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.98839, p-value = 1.917e-10
exp_problem_level <- problem_level_math_student_data %>% filter (!grepl("^P", Image))
# Modeling with HLM
model <- lmer(log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) * Condition + (1|problem_Order) + (1|pid),
data = exp_problem_level)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) * Condition +
## (1 | problem_Order) + (1 | pid)
## Data: exp_problem_level
##
## REML criterion at convergence: 2171.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.1343 -0.6443 -0.0716 0.5807 3.9265
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.156221 0.39525
## problem_Order (Intercept) 0.004498 0.06706
## Residual 0.242556 0.49250
## Number of obs: 1333, groups: pid, 112; problem_Order, 12
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 3.22139 0.06656 48.400
## scale(MA) 0.04422 0.06901 0.641
## scale(Pre_MP) -0.06908 0.06649 -1.039
## ConditionIncongruent 0.05080 0.08930 0.569
## scale(MA):scale(Pre_MP) 0.03109 0.06109 0.509
## scale(MA):ConditionIncongruent 0.02518 0.09231 0.273
## scale(Pre_MP):ConditionIncongruent 0.05244 0.09286 0.565
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.11056 0.08657 1.277
##
## Correlation of Fixed Effects:
## (Intr) sc(MA) sc(P_MP) CndtnI sc(MA):(P_MP) s(MA):C s(P_MP):
## scale(MA) 0.239
## scal(Pr_MP) 0.009 0.479
## CndtnIncngr -0.682 -0.178 -0.007
## sc(MA):(P_MP) 0.494 0.327 -0.075 -0.368
## scl(MA):CnI -0.179 -0.748 -0.358 0.084 -0.245
## sc(P_MP):CI -0.007 -0.343 -0.716 -0.073 0.054 0.409
## s(MA):(P_MP): -0.349 -0.231 0.053 0.435 -0.706 0.154 -0.235
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.98967, p-value = 4.353e-08
# Pretes_or_Exp = 0 for pretest, 1 for experiment
model <- lmer(log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) * Condition * Test + (1|problem_Order) + (1|pid),
data = problem_level_math_student_data)
summary(model)
## Linear mixed model fit by REML ['lmerMod']
## Formula: log(trialElapsedTime) ~ scale(MA) * scale(Pre_MP) * Condition *
## Test + (1 | problem_Order) + (1 | pid)
## Data: problem_level_math_student_data
##
## REML criterion at convergence: 4202.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.3225 -0.6737 -0.0610 0.5960 4.0151
##
## Random effects:
## Groups Name Variance Std.Dev.
## pid (Intercept) 0.1600 0.4000
## problem_Order (Intercept) 0.0142 0.1191
## Residual 0.2326 0.4822
## Number of obs: 2732, groups: pid, 117; problem_Order, 24
##
## Fixed effects:
## Estimate Std. Error
## (Intercept) 3.206214 0.070662
## scale(MA) 0.037378 0.064724
## scale(Pre_MP) -0.058119 0.065012
## ConditionIncongruent 0.058166 0.087818
## TestPretest 0.220083 0.057120
## scale(MA):scale(Pre_MP) 0.020988 0.059994
## scale(MA):ConditionIncongruent 0.025317 0.089485
## scale(Pre_MP):ConditionIncongruent 0.047306 0.091543
## scale(MA):TestPretest -0.009045 0.032268
## scale(Pre_MP):TestPretest -0.001743 0.031313
## ConditionIncongruent:TestPretest 0.024608 0.041972
## scale(MA):scale(Pre_MP):ConditionIncongruent 0.123086 0.086253
## scale(MA):scale(Pre_MP):TestPretest 0.032373 0.028823
## scale(MA):ConditionIncongruent:TestPretest 0.048736 0.043320
## scale(Pre_MP):ConditionIncongruent:TestPretest -0.062382 0.043666
## scale(MA):scale(Pre_MP):ConditionIncongruent:TestPretest -0.016179 0.040995
## t value
## (Intercept) 45.374
## scale(MA) 0.578
## scale(Pre_MP) -0.894
## ConditionIncongruent 0.662
## TestPretest 3.853
## scale(MA):scale(Pre_MP) 0.350
## scale(MA):ConditionIncongruent 0.283
## scale(Pre_MP):ConditionIncongruent 0.517
## scale(MA):TestPretest -0.280
## scale(Pre_MP):TestPretest -0.056
## ConditionIncongruent:TestPretest 0.586
## scale(MA):scale(Pre_MP):ConditionIncongruent 1.427
## scale(MA):scale(Pre_MP):TestPretest 1.123
## scale(MA):ConditionIncongruent:TestPretest 1.125
## scale(Pre_MP):ConditionIncongruent:TestPretest -1.429
## scale(MA):scale(Pre_MP):ConditionIncongruent:TestPretest -0.395
##
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
shapiro.test(resid(model))
##
## Shapiro-Wilk normality test
##
## data: resid(model)
## W = 0.99122, p-value = 7.183e-12
# Plot
ggplot(problem_level_math_student_data,
aes(x = problem_Order, y = log(trialElapsedTime), color = Condition, group = Condition)) +
stat_summary(fun = mean, geom = "line", size = 1) +
stat_summary(fun.data = mean_se, geom = "ribbon", aes(fill = Condition), alpha = 0.2, color = NA) +
facet_wrap(~MA_level) +
labs(x = "Problem Order",
y = "Log(Trial Elapsed Time)",
title = "Time Across Problems by Condition and Cluster Group",
color = "Condition",
fill = "Condition") +
theme_minimal()
# Define the columns of interest
aoi_group_columns <- c(
"Total_Problem_AOIs_prop", "Total_Response_AOIs_prop",
"Total_Hits_Timer_prop",
"New_Problem_AOIs", "New_Response_AOIs",
"Numerator_Denominator_Transitions",
"Total_Hits_Outside_of_Screen_prop")
# ---- Stats in congruent condition
print ("Congruent condition")
## [1] "Congruent condition"
AOI_problem_data_exp_congruent <- AOI_problem_data_exp %>% filter (Condition == "Congruent")
## Pivot the data to a longer format
long_data <- AOI_problem_data_exp_congruent %>%
pivot_longer(cols = all_of(aoi_group_columns), names_to = "Variable", values_to = "Value")
## Compute summary statistics for each variable
long_data %>%
group_by(Variable) %>%
summarise(
Minimum = min(Value, na.rm = TRUE),
Mean = mean(Value, na.rm = TRUE),
Maximum = max(Value, na.rm = TRUE),
`Standard Deviation` = sd(Value, na.rm = TRUE),
`Count > 3*SD` = sum(Value > 3 * sd(Value, na.rm = TRUE), na.rm = TRUE)
)
## # A tibble: 7 × 6
## Variable Minimum Mean Maximum `Standard Deviation` `Count > 3*SD`
## <chr> <dbl> <dbl> <dbl> <dbl> <int>
## 1 New_Problem_AOIs 3 8.01e+1 6.74e+2 75.6 23
## 2 New_Response_AOIs 0 1.37e+1 1.32e+2 15.3 15
## 3 Numerator_Denomin… 0 1.94e+1 2.15e+2 21.3 14
## 4 Total_Hits_Outsid… 0 2.26e-1 4.95e-1 0.133 57
## 5 Total_Hits_Timer_… 0 6.70e-3 5.73e-2 0.00876 23
## 6 Total_Problem_AOI… 0.00954 1.48e-1 3.96e-1 0.0596 128
## 7 Total_Response_AO… 0 2.09e-2 1.03e-1 0.0156 34
# ---- Stats in incongruent condition
print ("Incongruent condition")
## [1] "Incongruent condition"
AOI_problem_data_exp_incongruent <- AOI_problem_data_exp %>% filter (Condition == "Incongruent")
## Pivot the data to a longer format
long_data <- AOI_problem_data_exp_incongruent %>%
pivot_longer(cols = all_of(aoi_group_columns), names_to = "Variable", values_to = "Value")
## Compute summary statistics for each variable
long_data %>%
group_by(Variable) %>%
summarise(
Minimum = min(Value, na.rm = TRUE),
Mean = mean(Value, na.rm = TRUE),
Maximum = max(Value, na.rm = TRUE),
`Standard Deviation` = sd(Value, na.rm = TRUE),
`Count > 3*SD` = sum(Value > 3 * sd(Value, na.rm = TRUE), na.rm = TRUE)
)
## # A tibble: 7 × 6
## Variable Minimum Mean Maximum `Standard Deviation` `Count > 3*SD`
## <chr> <dbl> <dbl> <dbl> <dbl> <int>
## 1 New_Problem_AOIs 3 8.31e+1 6.56e+2 80.3 17
## 2 New_Response_AOIs 0 1.71e+1 1.25e+2 18.0 19
## 3 Numerator_Denomin… 0 2.04e+1 1.38e+2 20.9 17
## 4 Total_Hits_Outsid… 0.00163 2.47e-1 4.94e-1 0.121 74
## 5 Total_Hits_Timer_… 0 6.09e-3 5.10e-2 0.00814 17
## 6 Total_Problem_AOI… 0.0104 1.41e-1 3.77e-1 0.0591 99
## 7 Total_Response_AO… 0 2.36e-2 8.84e-2 0.0171 29
# Separate sums of Total Hits
student_total_average <- AOI_student_data_exp %>%
select (pid,
Total_Problem_AOIs_prop_Correct_Exp, Total_Response_AOIs_prop_Correct_Exp,
Total_Hits_Timer_prop_Correct_Exp,
Total_Hits_Outside_of_Screen_prop_Correct_Exp)
# Separate sums of New Hits
student_new_average <- AOI_student_data_exp %>%
select (pid, New_Problem_AOIs_Correct_Exp, New_Response_AOIs_Correct_Exp, Numerator_Denominator_Transitions_Correct_Exp)
# Create summary statistics for student-level pretest correct AOI data
student_summary_stats <- bind_rows(
# For Total Hits
student_total_average %>%
select(-pid) %>%
summarise(across(everything(), list(
mean = ~mean(., na.rm = TRUE),
sd = ~sd(., na.rm = TRUE),
median = ~median(., na.rm = TRUE),
max = ~max(., na.rm = TRUE),
zeros = ~sum(. == 0, na.rm = TRUE) / n()
))) %>%
pivot_longer(everything(),
names_to = c("AOI", "Stat"),
names_pattern = "(.*)_(.*)") %>%
mutate(Type = "Total_"),
# For New Hits
student_new_average %>%
select(-pid) %>%
summarise(across(everything(), list(
mean = ~mean(., na.rm = TRUE),
sd = ~sd(., na.rm = TRUE),
median = ~median(., na.rm = TRUE),
max = ~max(., na.rm = TRUE),
zeros = ~sum(. == 0, na.rm = TRUE) / n()
))) %>%
pivot_longer(everything(),
names_to = c("AOI", "Stat"),
names_pattern = "(.*)_(.*)") %>%
mutate(Type = "New_")
) %>%
pivot_wider(names_from = Stat, values_from = value)
# Display summary statistics in a clean format
knitr::kable(student_summary_stats,
digits = 2,
caption = "Summary Statistics for AOI Hits") %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed"))
| AOI | Type | mean | sd | median | max | zeros |
|---|---|---|---|---|---|---|
| Total_Problem_AOIs_prop_Correct_Exp | Total_ | 0.14 | 0.05 | 0.13 | 0.26 | 0.00 |
| Total_Response_AOIs_prop_Correct_Exp | Total_ | 0.02 | 0.01 | 0.02 | 0.09 | 0.00 |
| Total_Hits_Timer_prop_Correct_Exp | Total_ | 0.01 | 0.01 | 0.00 | 0.03 | 0.03 |
| Total_Hits_Outside_of_Screen_prop_Correct_Exp | Total_ | 0.24 | 0.11 | 0.25 | 0.44 | 0.00 |
| New_Problem_AOIs_Correct_Exp | New_ | 78.88 | 56.17 | 62.82 | 283.67 | 0.00 |
| New_Response_AOIs_Correct_Exp | New_ | 14.91 | 11.62 | 11.18 | 55.33 | 0.00 |
| Numerator_Denominator_Transitions_Correct_Exp | New_ | 19.03 | 14.45 | 16.17 | 76.67 | 0.00 |
# Select relevant columns for total and new hits
selected_data_total <- AOI_student_data_exp %>%
select(Total_Problem_AOIs_prop_Correct_Exp, Total_Response_AOIs_prop_Correct_Exp,
Total_Hits_Timer_prop_Correct_Exp,
Total_Hits_Outside_of_Screen_prop_Correct_Exp)
selected_data_new <- AOI_student_data_exp %>%
select(New_Problem_AOIs_Correct_Exp, New_Response_AOIs_Correct_Exp, Numerator_Denominator_Transitions_Correct_Exp)
# Data for plot for Total Hits
melted_data_total <- selected_data_total %>%
pivot_longer(everything(),
names_to = "AOI",
values_to = "Hits")
# Data for plot for New Hits
melted_data_new <- selected_data_new %>%
pivot_longer(everything(),
names_to = "AOI",
values_to = "Hits")
# Plot for Total Hits
ggplot(melted_data_total, aes(x = Hits)) +
geom_histogram(binwidth = function(x) diff(range(x))/30,
fill = '#69b3a2',
color = 'white',
alpha = 0.7) +
facet_wrap(~ AOI, scales = 'free', ncol = 3) +
labs(title = 'Distribution of Total Hits Across AOIs',
x = 'Number of Hits',
y = 'Frequency') +
theme_minimal() +
theme(
strip.text = element_text(size = 8),
axis.text.x = element_text(angle = 45, hjust = 1)
)
## Warning: Removed 44 rows containing non-finite outside the scale range
## (`stat_bin()`).
# Plot for New Hits
ggplot(melted_data_new, aes(x = Hits)) +
geom_histogram(binwidth = function(x) diff(range(x))/30,
fill = '#404080',
color = 'white',
alpha = 0.7) +
facet_wrap(~ AOI, scales = 'free', ncol = 3) +
labs(title = 'Distribution of New Hits Across AOIs',
x = 'Number of Hits',
y = 'Frequency') +
theme_minimal() +
theme(
strip.text = element_text(size = 8),
axis.text.x = element_text(angle = 45, hjust = 1)
)
## Warning: Removed 33 rows containing non-finite outside the scale range
## (`stat_bin()`).
# Separate sums of Total Hits
student_total_average <- AOI_student_data_exp %>%
select (pid, Total_Problem_AOIs_prop_Correct_dif, Total_Response_AOIs_prop_Correct_dif,
Total_Hits_Timer_prop_Correct_dif,
Total_Hits_Outside_of_Screen_prop_Correct_dif)
# Separate sums of New Hits
student_new_average <- AOI_student_data_exp %>%
select (pid, New_Problem_AOIs_Correct_dif, New_Response_AOIs_Correct_dif, Numerator_Denominator_Transitions_Correct_dif)
# Create summary statistics for student-level pretest correct AOI data
student_summary_stats <- bind_rows(
# For Total Hits
student_total_average %>%
select(-pid) %>%
summarise(across(everything(), list(
min = ~min(., na.rm = TRUE),
mean = ~mean(., na.rm = TRUE),
sd = ~sd(., na.rm = TRUE),
median = ~median(., na.rm = TRUE),
max = ~max(., na.rm = TRUE),
zeros = ~sum(. == 0, na.rm = TRUE) / n()
))) %>%
pivot_longer(everything(),
names_to = c("AOI", "Stat"),
names_pattern = "(.*)_(.*)") %>%
mutate(Type = "Total_"),
# For New Hits
student_new_average %>%
select(-pid) %>%
summarise(across(everything(), list(
min = ~min(., na.rm = TRUE),
mean = ~mean(., na.rm = TRUE),
sd = ~sd(., na.rm = TRUE),
median = ~median(., na.rm = TRUE),
max = ~max(., na.rm = TRUE),
zeros = ~sum(. == 0, na.rm = TRUE) / n()
))) %>%
pivot_longer(everything(),
names_to = c("AOI", "Stat"),
names_pattern = "(.*)_(.*)") %>%
mutate(Type = "New_")
) %>%
pivot_wider(names_from = Stat, values_from = value)
# Display summary statistics in a clean format
knitr::kable(student_summary_stats,
digits = 2,
caption = "Summary Statistics for AOI Hits") %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed"))
| AOI | Type | min | mean | sd | median | max | zeros |
|---|---|---|---|---|---|---|---|
| Total_Problem_AOIs_prop_Correct_dif | Total_ | -0.11 | -0.01 | 0.03 | -0.01 | 0.05 | 0 |
| Total_Response_AOIs_prop_Correct_dif | Total_ | -0.02 | 0.00 | 0.01 | 0.00 | 0.06 | 0 |
| Total_Hits_Timer_prop_Correct_dif | Total_ | -0.02 | 0.00 | 0.00 | 0.00 | 0.01 | 0 |
| Total_Hits_Outside_of_Screen_prop_Correct_dif | Total_ | -0.30 | 0.01 | 0.08 | 0.01 | 0.19 | 0 |
| New_Problem_AOIs_Correct_dif | New_ | -98.42 | -19.09 | 36.45 | -15.19 | 170.40 | 0 |
| New_Response_AOIs_Correct_dif | New_ | -39.44 | -1.01 | 9.38 | -2.04 | 35.93 | 0 |
| Numerator_Denominator_Transitions_Correct_dif | New_ | -37.83 | -5.57 | 9.90 | -5.05 | 36.87 | 0 |
## Warning: Removed 56 rows containing non-finite outside the scale range
## (`stat_bin()`).
## Warning: Removed 42 rows containing non-finite outside the scale range
## (`stat_bin()`).
print ("all participants")
## [1] "all participants"
table(AOI_student_data_exp$pre_cluster_groups, AOI_student_data_exp$Condition)
##
## Congruent Incongruent
## hMP_lMA 16 9
## hMP_mMA 12 18
## lMP_mMA 15 8
print ("with at least one correct response in pretest and experiment")
## [1] "with at least one correct response in pretest and experiment"
AOI_student_data_exp %>% filter (Exp_MP > 0 & Pre_MP > 0) %>%
group_by (pre_cluster_groups, Condition) %>%
summarise(n = n())
## `summarise()` has grouped output by 'pre_cluster_groups'. You can override
## using the `.groups` argument.
## # A tibble: 6 × 3
## # Groups: pre_cluster_groups [3]
## pre_cluster_groups Condition n
## <chr> <fct> <int>
## 1 hMP_lMA Congruent 16
## 2 hMP_lMA Incongruent 9
## 3 hMP_mMA Congruent 12
## 4 hMP_mMA Incongruent 18
## 5 lMP_mMA Congruent 8
## 6 lMP_mMA Incongruent 1
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 1.2004, df = 4, p-value = 0.88
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | 0.356422
## | 1.0000
## |
## hMP_mMA. | 0.964285 0.498308
## | 1.0000 1.0000
## |
## hMP_mMA. | 0.132314 -0.252413 -0.866111
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | 0.582695 0.213629 -0.253986 0.486805
## | 1.0000 1.0000 1.0000 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Warning: Removed 7 rows containing non-finite outside the scale range
## (`stat_boxplot()`).
## Warning: Removed 7 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 4.1419, df = 4, p-value = 0.39
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.093651
## | 1.0000
## |
## hMP_mMA. | -0.443452 -0.295548
## | 1.0000 1.0000
## |
## hMP_mMA. | -0.722214 -0.512250 -0.211445
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | -1.929193 -1.638861 -1.459174 -1.381955
## | 0.2685 0.5062 0.7226 0.8349
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Warning: Removed 7 rows containing non-finite outside the scale range
## (`stat_boxplot()`).
## Removed 7 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 12.6814, df = 4, p-value = 0.01
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.787402
## | 1.0000
## |
## hMP_mMA. | 0.160714 0.883208
## | 1.0000 1.0000
## |
## hMP_mMA. | 0.871067 1.536752 0.638401
## | 1.0000 0.6218 1.0000
## |
## lMP_mMA. | 2.929224 3.285520 2.644443 2.280672
## | 0.0170* 0.0051* 0.0409 0.1128
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Warning: Removed 7 rows containing non-finite outside the scale range
## (`stat_boxplot()`).
## Removed 7 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 3.2706, df = 4, p-value = 0.51
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.737394
## | 1.0000
## |
## hMP_mMA. | 0.389880 1.034419
## | 1.0000 1.0000
## |
## hMP_mMA. | 0.922890 1.529328 0.451353
## | 1.0000 0.6309 1.0000
## |
## lMP_mMA. | -0.559072 0.134102 -0.856580 -1.315978
## | 1.0000 1.0000 1.0000 0.9409
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Warning: Removed 7 rows containing non-finite outside the scale range
## (`stat_boxplot()`).
## Removed 7 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 3.467, df = 4, p-value = 0.48
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.662836
## | 1.0000
## |
## hMP_mMA. | -0.818452 -0.082478
## | 1.0000 1.0000
## |
## hMP_mMA. | -0.274551 0.445435 0.585540
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | -1.740211 -0.982381 -0.966143 -1.551356
## | 0.4091 1.0000 1.0000 0.6041
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Warning: Removed 7 rows containing non-finite outside the scale range
## (`stat_boxplot()`).
## Removed 7 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 1.3543, df = 4, p-value = 0.85
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.609198
## | 1.0000
## |
## hMP_mMA. | -0.285717 0.328200
## | 1.0000 1.0000
## |
## hMP_mMA. | 0.491773 1.035649 0.746165
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | 0.283476 0.774999 0.507978 -0.108774
## | 1.0000 1.0000 1.0000 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Warning: Removed 7 rows containing non-finite outside the scale range
## (`stat_boxplot()`).
## Removed 7 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 4.3453, df = 4, p-value = 0.36
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.679203
## | 1.0000
## |
## hMP_mMA. | -1.776785 -0.896954
## | 0.3780 1.0000
## |
## hMP_mMA. | -0.012128 0.683000 1.809481
## | 1.0000 1.0000 0.3519
## |
## lMP_mMA. | -0.055119 0.533292 1.434274 -0.046362
## | 1.0000 1.0000 0.7575 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Warning: Removed 7 rows containing non-finite outside the scale range
## (`stat_boxplot()`).
## Removed 7 rows containing missing values or values outside the scale range
## (`geom_point()`).
| outcome | comparison | Z | p_adjusted |
|---|---|---|---|
| Total_Problem_AOIs_prop_Correct_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | 0.3564 | 1.0000 |
| Total_Problem_AOIs_prop_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | 0.9643 | 1.0000 |
| Total_Problem_AOIs_prop_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | 0.4983 | 1.0000 |
| Total_Problem_AOIs_prop_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | 0.1323 | 1.0000 |
| Total_Problem_AOIs_prop_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | -0.2524 | 1.0000 |
| Total_Problem_AOIs_prop_Correct_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | -0.8661 | 1.0000 |
| Total_Problem_AOIs_prop_Correct_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | 0.5827 | 1.0000 |
| Total_Problem_AOIs_prop_Correct_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | 0.2136 | 1.0000 |
| Total_Problem_AOIs_prop_Correct_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | -0.2540 | 1.0000 |
| Total_Problem_AOIs_prop_Correct_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | 0.4868 | 1.0000 |
| Total_Response_AOIs_prop_Correct_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | -1.9292 | 0.2685 |
| Total_Response_AOIs_prop_Correct_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | -1.6389 | 0.5062 |
| Total_Response_AOIs_prop_Correct_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | -1.4592 | 0.7226 |
| Total_Response_AOIs_prop_Correct_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | -1.3820 | 0.8349 |
| Total_Response_AOIs_prop_Correct_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.0937 | 1.0000 |
| Total_Response_AOIs_prop_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | -0.4435 | 1.0000 |
| Total_Response_AOIs_prop_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | -0.2955 | 1.0000 |
| Total_Response_AOIs_prop_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | -0.7222 | 1.0000 |
| Total_Response_AOIs_prop_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | -0.5123 | 1.0000 |
| Total_Response_AOIs_prop_Correct_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | -0.2114 | 1.0000 |
| Total_Hits_Timer_prop_Correct_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | 3.2855 | 0.0051 |
| Total_Hits_Timer_prop_Correct_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | 2.9292 | 0.0170 |
| Total_Hits_Timer_prop_Correct_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | 2.6444 | 0.0409 |
| Total_Hits_Timer_prop_Correct_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | 2.2807 | 0.1128 |
| Total_Hits_Timer_prop_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | 1.5368 | 0.6218 |
| Total_Hits_Timer_prop_Correct_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.7874 | 1.0000 |
| Total_Hits_Timer_prop_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | 0.1607 | 1.0000 |
| Total_Hits_Timer_prop_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | 0.8832 | 1.0000 |
| Total_Hits_Timer_prop_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | 0.8711 | 1.0000 |
| Total_Hits_Timer_prop_Correct_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.6384 | 1.0000 |
| New_Problem_AOIs_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | 1.5293 | 0.6309 |
| New_Problem_AOIs_Correct_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | -1.3160 | 0.9409 |
| New_Problem_AOIs_Correct_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.7374 | 1.0000 |
| New_Problem_AOIs_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | 0.3899 | 1.0000 |
| New_Problem_AOIs_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | 1.0344 | 1.0000 |
| New_Problem_AOIs_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | 0.9229 | 1.0000 |
| New_Problem_AOIs_Correct_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.4514 | 1.0000 |
| New_Problem_AOIs_Correct_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | -0.5591 | 1.0000 |
| New_Problem_AOIs_Correct_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | 0.1341 | 1.0000 |
| New_Problem_AOIs_Correct_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | -0.8566 | 1.0000 |
| New_Response_AOIs_Correct_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | -1.7402 | 0.4091 |
| New_Response_AOIs_Correct_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | -1.5514 | 0.6041 |
| New_Response_AOIs_Correct_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.6628 | 1.0000 |
| New_Response_AOIs_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | -0.8185 | 1.0000 |
| New_Response_AOIs_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | -0.0825 | 1.0000 |
| New_Response_AOIs_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | -0.2746 | 1.0000 |
| New_Response_AOIs_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | 0.4454 | 1.0000 |
| New_Response_AOIs_Correct_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.5855 | 1.0000 |
| New_Response_AOIs_Correct_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | -0.9824 | 1.0000 |
| New_Response_AOIs_Correct_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | -0.9661 | 1.0000 |
| Numerator_Denominator_Transitions_Correct_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.6092 | 1.0000 |
| Numerator_Denominator_Transitions_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | -0.2857 | 1.0000 |
| Numerator_Denominator_Transitions_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | 0.3282 | 1.0000 |
| Numerator_Denominator_Transitions_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | 0.4918 | 1.0000 |
| Numerator_Denominator_Transitions_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | 1.0356 | 1.0000 |
| Numerator_Denominator_Transitions_Correct_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.7462 | 1.0000 |
| Numerator_Denominator_Transitions_Correct_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | 0.2835 | 1.0000 |
| Numerator_Denominator_Transitions_Correct_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | 0.7750 | 1.0000 |
| Numerator_Denominator_Transitions_Correct_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | 0.5080 | 1.0000 |
| Numerator_Denominator_Transitions_Correct_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | -0.1088 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_Correct_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | 1.8095 | 0.3519 |
| Total_Hits_Outside_of_Screen_prop_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | -1.7768 | 0.3780 |
| Total_Hits_Outside_of_Screen_prop_Correct_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | 1.4343 | 0.7575 |
| Total_Hits_Outside_of_Screen_prop_Correct_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.6792 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | -0.8970 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_Correct_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | -0.0121 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_Correct_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | 0.6830 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_Correct_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | -0.0551 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_Correct_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | 0.5333 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_Correct_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | -0.0464 | 1.0000 |
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 0.9162, df = 4, p-value = 0.92
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | 0.297281
## | 1.0000
## |
## hMP_mMA. | 0.528091 0.176435
## | 1.0000 1.0000
## |
## hMP_mMA. | 0.161880 -0.167168 -0.391886
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | -0.402759 -0.637083 -0.894449 -0.573140
## | 1.0000 1.0000 1.0000 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 0.8057, df = 4, p-value = 0.94
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.124481
## | 1.0000
## |
## hMP_mMA. | -0.353847 -0.188817
## | 1.0000 1.0000
## |
## hMP_mMA. | -0.119175 0.026747 0.252711
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | 0.524099 0.569749 0.835242 0.655909
## | 1.0000 1.0000 1.0000 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 2.6595, df = 4, p-value = 0.62
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.162153
## | 1.0000
## |
## hMP_mMA. | 0.798838 0.845035
## | 1.0000 1.0000
## |
## hMP_mMA. | 1.217579 1.190242 0.303986
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | 0.030762 0.186463 -0.759119 -1.165020
## | 1.0000 1.0000 1.0000 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 3.9551, df = 4, p-value = 0.41
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.822649
## | 1.0000
## |
## hMP_mMA. | 0.210434 0.959572
## | 1.0000 1.0000
## |
## hMP_mMA. | 1.147573 1.805440 0.842379
## | 1.0000 0.3550 1.0000
## |
## lMP_mMA. | -0.305062 0.552920 -0.490577 -1.441452
## | 1.0000 1.0000 1.0000 0.7473
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 2.5487, df = 4, p-value = 0.64
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.705537
## | 1.0000
## |
## hMP_mMA. | -0.800185 -0.026310
## | 1.0000 1.0000
## |
## hMP_mMA. | 0.193165 0.882659 0.998036
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | 0.570532 1.183535 1.318426 0.396672
## | 1.0000 1.0000 0.9368 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 3.709, df = 4, p-value = 0.45
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.861542
## | 1.0000
## |
## hMP_mMA. | -0.407461 0.461209
## | 1.0000 1.0000
## |
## hMP_mMA. | 0.981213 1.705123 1.322158
## | 1.0000 0.4409 0.9306
## |
## lMP_mMA. | -0.339525 0.561980 0.086696 -1.313381
## | 1.0000 1.0000 1.0000 0.9453
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 1.8946, df = 4, p-value = 0.76
##
##
## Comparison of x by group
## (Bonferroni)
## Col Mean-|
## Row Mean | hMP_lMA. hMP_lMA. hMP_mMA. hMP_mMA.
## ---------+--------------------------------------------
## hMP_lMA. | -0.736242
## | 1.0000
## |
## hMP_mMA. | -1.192896 -0.337395
## | 1.0000 1.0000
## |
## hMP_mMA. | -0.265166 0.528253 0.977884
## | 1.0000 1.0000 1.0000
## |
## lMP_mMA. | -0.841407 0.010359 0.395418 -0.604374
## | 1.0000 1.0000 1.0000 1.0000
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
| outcome | comparison | Z | p_adjusted |
|---|---|---|---|
| Total_Problem_AOIs_prop_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | 0.2973 | 1.0000 |
| Total_Problem_AOIs_prop_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | 0.5281 | 1.0000 |
| Total_Problem_AOIs_prop_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | 0.1764 | 1.0000 |
| Total_Problem_AOIs_prop_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | 0.1619 | 1.0000 |
| Total_Problem_AOIs_prop_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | -0.1672 | 1.0000 |
| Total_Problem_AOIs_prop_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | -0.3919 | 1.0000 |
| Total_Problem_AOIs_prop_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | -0.4028 | 1.0000 |
| Total_Problem_AOIs_prop_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | -0.6371 | 1.0000 |
| Total_Problem_AOIs_prop_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | -0.8944 | 1.0000 |
| Total_Problem_AOIs_prop_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | -0.5731 | 1.0000 |
| Total_Response_AOIs_prop_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.1245 | 1.0000 |
| Total_Response_AOIs_prop_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | -0.3538 | 1.0000 |
| Total_Response_AOIs_prop_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | -0.1888 | 1.0000 |
| Total_Response_AOIs_prop_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | -0.1192 | 1.0000 |
| Total_Response_AOIs_prop_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | 0.0267 | 1.0000 |
| Total_Response_AOIs_prop_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.2527 | 1.0000 |
| Total_Response_AOIs_prop_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | 0.5241 | 1.0000 |
| Total_Response_AOIs_prop_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | 0.5697 | 1.0000 |
| Total_Response_AOIs_prop_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | 0.8352 | 1.0000 |
| Total_Response_AOIs_prop_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | 0.6559 | 1.0000 |
| Total_Hits_Timer_prop_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.1622 | 1.0000 |
| Total_Hits_Timer_prop_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | 0.7988 | 1.0000 |
| Total_Hits_Timer_prop_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | 0.8450 | 1.0000 |
| Total_Hits_Timer_prop_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | 1.2176 | 1.0000 |
| Total_Hits_Timer_prop_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | 1.1902 | 1.0000 |
| Total_Hits_Timer_prop_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.3040 | 1.0000 |
| Total_Hits_Timer_prop_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | 0.0308 | 1.0000 |
| Total_Hits_Timer_prop_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | 0.1865 | 1.0000 |
| Total_Hits_Timer_prop_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | -0.7591 | 1.0000 |
| Total_Hits_Timer_prop_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | -1.1650 | 1.0000 |
| New_Problem_AOIs_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | 1.8054 | 0.3550 |
| New_Problem_AOIs_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | -1.4415 | 0.7473 |
| New_Problem_AOIs_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.8226 | 1.0000 |
| New_Problem_AOIs_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | 0.2104 | 1.0000 |
| New_Problem_AOIs_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | 0.9596 | 1.0000 |
| New_Problem_AOIs_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | 1.1476 | 1.0000 |
| New_Problem_AOIs_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.8424 | 1.0000 |
| New_Problem_AOIs_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | -0.3051 | 1.0000 |
| New_Problem_AOIs_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | 0.5529 | 1.0000 |
| New_Problem_AOIs_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | -0.4906 | 1.0000 |
| New_Response_AOIs_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | 1.3184 | 0.9368 |
| New_Response_AOIs_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.7055 | 1.0000 |
| New_Response_AOIs_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | -0.8002 | 1.0000 |
| New_Response_AOIs_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | -0.0263 | 1.0000 |
| New_Response_AOIs_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | 0.1932 | 1.0000 |
| New_Response_AOIs_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | 0.8827 | 1.0000 |
| New_Response_AOIs_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.9980 | 1.0000 |
| New_Response_AOIs_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | 0.5705 | 1.0000 |
| New_Response_AOIs_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | 1.1835 | 1.0000 |
| New_Response_AOIs_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | 0.3967 | 1.0000 |
| Numerator_Denominator_Transitions_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | 1.7051 | 0.4409 |
| Numerator_Denominator_Transitions_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | 1.3222 | 0.9306 |
| Numerator_Denominator_Transitions_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | -1.3134 | 0.9453 |
| Numerator_Denominator_Transitions_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.8615 | 1.0000 |
| Numerator_Denominator_Transitions_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | -0.4075 | 1.0000 |
| Numerator_Denominator_Transitions_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | 0.4612 | 1.0000 |
| Numerator_Denominator_Transitions_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | 0.9812 | 1.0000 |
| Numerator_Denominator_Transitions_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | -0.3395 | 1.0000 |
| Numerator_Denominator_Transitions_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | 0.5620 | 1.0000 |
| Numerator_Denominator_Transitions_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | 0.0867 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_dif | hMP_lMA.Congruent - hMP_lMA.Incongruent | -0.7362 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_dif | hMP_lMA.Congruent - hMP_mMA.Congruent | -1.1929 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_dif | hMP_lMA.Incongruent - hMP_mMA.Congruent | -0.3374 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_dif | hMP_lMA.Congruent - hMP_mMA.Incongruent | -0.2652 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_dif | hMP_lMA.Incongruent - hMP_mMA.Incongruent | 0.5283 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_dif | hMP_mMA.Congruent - hMP_mMA.Incongruent | 0.9779 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_dif | hMP_lMA.Congruent - lMP_mMA.Congruent | -0.8414 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_dif | hMP_lMA.Incongruent - lMP_mMA.Congruent | 0.0104 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_dif | hMP_mMA.Congruent - lMP_mMA.Congruent | 0.3954 | 1.0000 |
| Total_Hits_Outside_of_Screen_prop_dif | hMP_mMA.Incongruent - lMP_mMA.Congruent | -0.6044 | 1.0000 |
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 14 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning: Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 14 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 14 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 14 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 14 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 14 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_point()`).
# Create empty dataframe for storing results
model_summaries <- data.frame()
for(dv in outcome_vars) {
# Fit model
formula <- as.formula(paste0(dv, " ~ scale(MA) * scale(Pre_MP) * Condition"))
model <- lm_robust(
formula,
data = AOI_student_data_exp
)
# Get model summary
sum_model <- summary(model)
fixed_effects <- sum_model$coefficients
# Get standardized estimates
std_params <- standardize_parameters(model)
# Add to summary dataframe
temp_df <- data.frame(
DV = dv,
Predictor = rownames(fixed_effects),
Estimate = round(fixed_effects[,"Estimate"], 3),
SE = round(fixed_effects[,"Std. Error"], 3),
z_value = round(fixed_effects[,"t value"], 3),
p_value = fixed_effects[,"Pr(>|t|)"],
Std_Estimate = round(std_params$Std_Coefficient, 3)
)
model_summaries <- rbind(model_summaries, temp_df)
}
# Format results
model_summaries <- model_summaries %>%
mutate(
p_value = format.pval(p_value, digits = 3),
Significant = ifelse(as.numeric(p_value) < 0.05, "*", "")
)
# Display formatted table
kable(model_summaries,
caption = "Summary of Robust Linear Models",
digits = 3) %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed"))
| DV | Predictor | Estimate | SE | z_value | p_value | Std_Estimate | Significant | |
|---|---|---|---|---|---|---|---|---|
| (Intercept) | Total_Problem_AOIs_prop_Correct_dif | (Intercept) | -0.014 | 0.007 | -2.038 | 0.046281 | -0.112 |
|
| scale(MA) | Total_Problem_AOIs_prop_Correct_dif | scale(MA) | -0.009 | 0.011 | -0.817 | 0.417330 | -0.300 | |
| scale(Pre_MP) | Total_Problem_AOIs_prop_Correct_dif | scale(Pre_MP) | -0.001 | 0.008 | -0.169 | 0.866466 | -0.012 | |
| ConditionIncongruent | Total_Problem_AOIs_prop_Correct_dif | ConditionIncongruent | 0.004 | 0.009 | 0.460 | 0.647081 | 0.126 | |
| scale(MA):scale(Pre_MP) | Total_Problem_AOIs_prop_Correct_dif | scale(MA):scale(Pre_MP) | -0.004 | 0.012 | -0.339 | 0.735803 | -0.079 | |
| scale(MA):ConditionIncongruent | Total_Problem_AOIs_prop_Correct_dif | scale(MA):ConditionIncongruent | 0.025 | 0.013 | 1.875 | 0.066002 | 0.430 | |
| scale(Pre_MP):ConditionIncongruent | Total_Problem_AOIs_prop_Correct_dif | scale(Pre_MP):ConditionIncongruent | 0.008 | 0.013 | 0.597 | 0.552829 | 0.247 | |
| scale(MA):scale(Pre_MP):ConditionIncongruent | Total_Problem_AOIs_prop_Correct_dif | scale(MA):scale(Pre_MP):ConditionIncongruent | -0.027 | 0.020 | -1.352 | 0.181916 | -0.509 | |
| (Intercept)1 | Total_Response_AOIs_prop_Correct_dif | (Intercept) | 0.004 | 0.002 | 1.757 | 0.084404 | 0.034 | |
| scale(MA)1 | Total_Response_AOIs_prop_Correct_dif | scale(MA) | 0.000 | 0.004 | 0.101 | 0.919885 | 0.051 | |
| scale(Pre_MP)1 | Total_Response_AOIs_prop_Correct_dif | scale(Pre_MP) | -0.006 | 0.003 | -1.809 | 0.075864 | -0.332 | |
| ConditionIncongruent1 | Total_Response_AOIs_prop_Correct_dif | ConditionIncongruent | -0.002 | 0.003 | -0.692 | 0.491695 | -0.144 | |
| scale(MA):scale(Pre_MP)1 | Total_Response_AOIs_prop_Correct_dif | scale(MA):scale(Pre_MP) | 0.001 | 0.005 | 0.106 | 0.916029 | 0.027 | |
| scale(MA):ConditionIncongruent1 | Total_Response_AOIs_prop_Correct_dif | scale(MA):ConditionIncongruent | 0.009 | 0.005 | 1.794 | 0.078133 | 0.378 | |
| scale(Pre_MP):ConditionIncongruent1 | Total_Response_AOIs_prop_Correct_dif | scale(Pre_MP):ConditionIncongruent | 0.004 | 0.005 | 0.847 | 0.400630 | 0.335 | |
| scale(MA):scale(Pre_MP):ConditionIncongruent1 | Total_Response_AOIs_prop_Correct_dif | scale(MA):scale(Pre_MP):ConditionIncongruent | -0.011 | 0.007 | -1.693 | 0.096048 | -0.602 | |
| (Intercept)2 | Total_Hits_Timer_prop_Correct_dif | (Intercept) | -0.001 | 0.001 | -1.589 | 0.117601 | 0.159 | |
| scale(MA)2 | Total_Hits_Timer_prop_Correct_dif | scale(MA) | 0.000 | 0.001 | 0.013 | 0.989478 | 0.140 | |
| scale(Pre_MP)2 | Total_Hits_Timer_prop_Correct_dif | scale(Pre_MP) | 0.002 | 0.001 | 2.054 | 0.044689 | 0.378 |
|
| ConditionIncongruent2 | Total_Hits_Timer_prop_Correct_dif | ConditionIncongruent | 0.000 | 0.001 | 0.156 | 0.876907 | -0.090 | |
| scale(MA):scale(Pre_MP)2 | Total_Hits_Timer_prop_Correct_dif | scale(MA):scale(Pre_MP) | 0.001 | 0.001 | 1.248 | 0.217081 | 0.242 | |
| scale(MA):ConditionIncongruent2 | Total_Hits_Timer_prop_Correct_dif | scale(MA):ConditionIncongruent | -0.002 | 0.002 | -0.963 | 0.339848 | -0.493 | |
| scale(Pre_MP):ConditionIncongruent2 | Total_Hits_Timer_prop_Correct_dif | scale(Pre_MP):ConditionIncongruent | -0.002 | 0.002 | -1.505 | 0.137912 | -0.387 | |
| scale(MA):scale(Pre_MP):ConditionIncongruent2 | Total_Hits_Timer_prop_Correct_dif | scale(MA):scale(Pre_MP):ConditionIncongruent | 0.000 | 0.003 | -0.176 | 0.860626 | -0.077 | |
| (Intercept)3 | New_Problem_AOIs_Correct_dif | (Intercept) | -20.220 | 8.153 | -2.480 | 0.016170 | -0.054 |
|
| scale(MA)3 | New_Problem_AOIs_Correct_dif | scale(MA) | 9.475 | 10.738 | 0.882 | 0.381329 | 0.121 | |
| scale(Pre_MP)3 | New_Problem_AOIs_Correct_dif | scale(Pre_MP) | -0.097 | 8.406 | -0.012 | 0.990824 | 0.038 | |
| ConditionIncongruent3 | New_Problem_AOIs_Correct_dif | ConditionIncongruent | -3.047 | 9.988 | -0.305 | 0.761473 | -0.041 | |
| scale(MA):scale(Pre_MP)3 | New_Problem_AOIs_Correct_dif | scale(MA):scale(Pre_MP) | -12.697 | 6.896 | -1.841 | 0.070913 | -0.212 | |
| scale(MA):ConditionIncongruent3 | New_Problem_AOIs_Correct_dif | scale(MA):ConditionIncongruent | -15.095 | 16.828 | -0.897 | 0.373542 | -0.322 | |
| scale(Pre_MP):ConditionIncongruent3 | New_Problem_AOIs_Correct_dif | scale(Pre_MP):ConditionIncongruent | -1.734 | 14.060 | -0.123 | 0.902309 | -0.051 | |
| scale(MA):scale(Pre_MP):ConditionIncongruent3 | New_Problem_AOIs_Correct_dif | scale(MA):scale(Pre_MP):ConditionIncongruent | 6.478 | 21.891 | 0.296 | 0.768390 | 0.108 | |
| (Intercept)4 | New_Response_AOIs_Correct_dif | (Intercept) | -1.397 | 2.683 | -0.521 | 0.604650 | -0.111 | |
| scale(MA)4 | New_Response_AOIs_Correct_dif | scale(MA) | 4.395 | 4.190 | 1.049 | 0.298768 | 0.292 | |
| scale(Pre_MP)4 | New_Response_AOIs_Correct_dif | scale(Pre_MP) | -0.390 | 3.029 | -0.129 | 0.898099 | 0.019 | |
| ConditionIncongruent4 | New_Response_AOIs_Correct_dif | ConditionIncongruent | -1.219 | 3.080 | -0.396 | 0.693751 | -0.009 | |
| scale(MA):scale(Pre_MP)4 | New_Response_AOIs_Correct_dif | scale(MA):scale(Pre_MP) | -3.847 | 4.373 | -0.880 | 0.382751 | -0.250 | |
| scale(MA):ConditionIncongruent4 | New_Response_AOIs_Correct_dif | scale(MA):ConditionIncongruent | -0.055 | 4.946 | -0.011 | 0.991121 | -0.127 | |
| scale(Pre_MP):ConditionIncongruent4 | New_Response_AOIs_Correct_dif | scale(Pre_MP):ConditionIncongruent | 2.450 | 3.704 | 0.662 | 0.510924 | 0.212 | |
| scale(MA):scale(Pre_MP):ConditionIncongruent4 | New_Response_AOIs_Correct_dif | scale(MA):scale(Pre_MP):ConditionIncongruent | -3.311 | 6.237 | -0.531 | 0.597668 | -0.215 | |
| (Intercept)5 | Numerator_Denominator_Transitions_Correct_dif | (Intercept) | -5.998 | 2.096 | -2.862 | 0.005904 | -0.003 |
|
| scale(MA)5 | Numerator_Denominator_Transitions_Correct_dif | scale(MA) | 2.521 | 2.665 | 0.946 | 0.348210 | 0.187 | |
| scale(Pre_MP)5 | Numerator_Denominator_Transitions_Correct_dif | scale(Pre_MP) | 1.994 | 2.766 | 0.721 | 0.474002 | 0.149 | |
| ConditionIncongruent5 | Numerator_Denominator_Transitions_Correct_dif | ConditionIncongruent | -0.900 | 2.562 | -0.351 | 0.726674 | -0.051 | |
| scale(MA):scale(Pre_MP)5 | Numerator_Denominator_Transitions_Correct_dif | scale(MA):scale(Pre_MP) | -1.411 | 1.772 | -0.796 | 0.429123 | -0.087 | |
| scale(MA):ConditionIncongruent5 | Numerator_Denominator_Transitions_Correct_dif | scale(MA):ConditionIncongruent | -5.004 | 4.101 | -1.220 | 0.227479 | -0.421 | |
| scale(Pre_MP):ConditionIncongruent5 | Numerator_Denominator_Transitions_Correct_dif | scale(Pre_MP):ConditionIncongruent | -1.023 | 3.701 | -0.276 | 0.783233 | -0.083 | |
| scale(MA):scale(Pre_MP):ConditionIncongruent5 | Numerator_Denominator_Transitions_Correct_dif | scale(MA):scale(Pre_MP):ConditionIncongruent | 1.354 | 5.022 | 0.270 | 0.788516 | 0.083 | |
| (Intercept)6 | Total_Hits_Outside_of_Screen_prop_Correct_dif | (Intercept) | 0.011 | 0.022 | 0.500 | 0.618880 | 0.109 | |
| scale(MA)6 | Total_Hits_Outside_of_Screen_prop_Correct_dif | scale(MA) | 0.035 | 0.017 | 2.109 | 0.039434 | 0.461 |
|
| scale(Pre_MP)6 | Total_Hits_Outside_of_Screen_prop_Correct_dif | scale(Pre_MP) | 0.041 | 0.029 | 1.419 | 0.161301 | 0.319 | |
| ConditionIncongruent6 | Total_Hits_Outside_of_Screen_prop_Correct_dif | ConditionIncongruent | 0.002 | 0.027 | 0.074 | 0.941134 | -0.045 | |
| scale(MA):scale(Pre_MP)6 | Total_Hits_Outside_of_Screen_prop_Correct_dif | scale(MA):scale(Pre_MP) | 0.012 | 0.028 | 0.439 | 0.662082 | 0.093 | |
| scale(MA):ConditionIncongruent6 | Total_Hits_Outside_of_Screen_prop_Correct_dif | scale(MA):ConditionIncongruent | -0.104 | 0.028 | -3.717 | 0.000468 | -0.801 |
|
| scale(Pre_MP):ConditionIncongruent6 | Total_Hits_Outside_of_Screen_prop_Correct_dif | scale(Pre_MP):ConditionIncongruent | -0.047 | 0.037 | -1.283 | 0.204677 | -0.523 | |
| scale(MA):scale(Pre_MP):ConditionIncongruent6 | Total_Hits_Outside_of_Screen_prop_Correct_dif | scale(MA):scale(Pre_MP):ConditionIncongruent | 0.094 | 0.041 | 2.270 | 0.027090 | 0.718 |
|